diff --git a/sys/contrib/zstd/CHANGELOG b/sys/contrib/zstd/CHANGELOG --- a/sys/contrib/zstd/CHANGELOG +++ b/sys/contrib/zstd/CHANGELOG @@ -1,3 +1,82 @@ +v1.5.5 (Apr 2023) +fix: fix rare corruption bug affecting the high compression mode, reported by @danlark1 (#3517, @terrelln) +perf: improve mid-level compression speed (#3529, #3533, #3543, @yoniko and #3552, @terrelln) +lib: deprecated bufferless block-level API (#3534) by @terrelln +cli: mmap large dictionaries to save memory, by @daniellerozenblit +cli: improve speed of --patch-from mode (~+50%) (#3545) by @daniellerozenblit +cli: improve i/o speed (~+10%) when processing lots of small files (#3479) by @felixhandte +cli: zstd no longer crashes when requested to write into write-protected directory (#3541) by @felixhandte +cli: fix decompression into block device using -o, reported by @georgmu (#3583) +build: fix zstd CLI compiled with lzma support but not zlib support (#3494) by @Hello71 +build: fix cmake does no longer require 3.18 as minimum version (#3510) by @kou +build: fix MSVC+ClangCL linking issue (#3569) by @tru +build: fix zstd-dll, version of zstd CLI that links to the dynamic library (#3496) by @yoniko +build: fix MSVC warnings (#3495) by @embg +doc: updated zstd specification to clarify corner cases, by @Cyan4973 +doc: document how to create fat binaries for macos (#3568) by @rickmark +misc: improve seekable format ingestion speed (~+100%) for very small chunk sizes (#3544) by @Cyan4973 +misc: tests/fullbench can benchmark multiple files (#3516) by @dloidolt + +v1.5.4 (Feb 2023) +perf: +20% faster huffman decompression for targets that can't compile x64 assembly (#3449, @terrelln) +perf: up to +10% faster streaming compression at levels 1-2 (#3114, @embg) +perf: +4-13% for levels 5-12 by optimizing function generation (#3295, @terrelln) +pref: +3-11% compression speed for `arm` target (#3199, #3164, #3145, #3141, #3138, @JunHe77 and #3139, #3160, @danlark1) +perf: +5-30% faster dictionary compression at levels 1-4 (#3086, #3114, #3152, @embg) +perf: +10-20% cold dict compression speed by prefetching CDict tables (#3177, @embg) +perf: +1% faster compression by removing a branch in ZSTD_fast_noDict (#3129, @felixhandte) +perf: Small compression ratio improvements in high compression mode (#2983, #3391, @Cyan4973 and #3285, #3302, @daniellerozenblit) +perf: small speed improvement by better detecting `STATIC_BMI2` for `clang` (#3080, @TocarIP) +perf: Improved streaming performance when `ZSTD_c_stableInBuffer` is set (#2974, @Cyan4973) +cli: Asynchronous I/O for improved cli speed (#2975, #2985, #3021, #3022, @yoniko) +cli: Change `zstdless` behavior to align with `zless` (#2909, @binhdvo) +cli: Keep original file if `-c` or `--stdout` is given (#3052, @dirkmueller) +cli: Keep original files when result is concatenated into a single output with `-o` (#3450, @Cyan4973) +cli: Preserve Permissions and Ownership of regular files (#3432, @felixhandte) +cli: Print zlib/lz4/lzma library versions with `-vv` (#3030, @terrelln) +cli: Print checksum value for single frame files with `-lv` (#3332, @Cyan4973) +cli: Print `dictID` when present with `-lv` (#3184, @htnhan) +cli: when `stderr` is *not* the console, disable status updates, but preserve final summary (#3458, @Cyan4973) +cli: support `--best` and `--no-name` in `gzip` compatibility mode (#3059, @dirkmueller) +cli: support for `posix` high resolution timer `clock_gettime()`, for improved benchmark accuracy (#3423, @Cyan4973) +cli: improved help/usage (`-h`, `-H`) formatting (#3094, @dirkmueller and #3385, @jonpalmisc) +cli: Fix better handling of bogus numeric values (#3268, @ctkhanhly) +cli: Fix input consists of multiple files _and_ `stdin` (#3222, @yoniko) +cli: Fix tiny files passthrough (#3215, @cgbur) +cli: Fix for `-r` on empty directory (#3027, @brailovich) +cli: Fix empty string as argument for `--output-dir-*` (#3220, @embg) +cli: Fix decompression memory usage reported by `-vv --long` (#3042, @u1f35c, and #3232, @zengyijing) +cli: Fix infinite loop when empty input is passed to trainer (#3081, @terrelln) +cli: Fix `--adapt` doesn't work when `--no-progress` is also set (#3354, @terrelln) +api: Support for Block-Level Sequence Producer (#3333, @embg) +api: Support for in-place decompression (#3432, @terrelln) +api: New `ZSTD_CCtx_setCParams()` function, set all parameters defined in a `ZSTD_compressionParameters` structure (#3403, @Cyan4973) +api: Streaming decompression detects incorrect header ID sooner (#3175, @Cyan4973) +api: Window size resizing optimization for edge case (#3345, @daniellerozenblit) +api: More accurate error codes for busy-loop scenarios (#3413, #3455, @Cyan4973) +api: Fix limit overflow in `compressBound` and `decompressBound` (#3362, #3373, Cyan4973) reported by @nigeltao +api: Deprecate several advanced experimental functions: streaming (#3408, @embg), copy (#3196, @mileshu) +bug: Fix corruption that rarely occurs in 32-bit mode with wlog=25 (#3361, @terrelln) +bug: Fix for block-splitter (#3033, @Cyan4973) +bug: Fixes for Sequence Compression API (#3023, #3040, @Cyan4973) +bug: Fix leaking thread handles on Windows (#3147, @animalize) +bug: Fix timing issues with cmake/meson builds (#3166, #3167, #3170, @Cyan4973) +build: Allow user to select legacy level for cmake (#3050, @shadchin) +build: Enable legacy support by default in cmake (#3079, @niamster) +build: Meson build script improvements (#3039, #3120, #3122, #3327, #3357, @eli-schwartz and #3276, @neheb) +build: Add aarch64 to supported architectures for zstd_trace (#3054, @ooosssososos) +build: support AIX architecture (#3219, @qiongsiwu) +build: Fix `ZSTD_LIB_MINIFY` build macro, which now reduces static library size by half (#3366, @terrelln) +build: Fix Windows issues with Multithreading translation layer (#3364, #3380, @yoniko) and ARM64 target (#3320, @cwoffenden) +build: Fix `cmake` script (#3382, #3392, @terrelln and #3252 @Tachi107 and #3167 @Cyan4973) +doc: Updated man page, providing more details for `--train` mode (#3112, @Cyan4973) +doc: Add decompressor errata document (#3092, @terrelln) +misc: Enable Intel CET (#2992, #2994, @hjl-tools) +misc: Fix `contrib/` seekable format (#3058, @yhoogstrate and #3346, @daniellerozenblit) +misc: Improve speed of the one-file library generator (#3241, @wahern and #3005, @cwoffenden) + +v1.5.3 (dev version, unpublished) + v1.5.2 (Jan, 2022) perf: Regain Minimal memset()-ing During Reuse of Compression Contexts (@Cyan4973, #2969) build: Build Zstd with `noexecstack` on All Architectures (@felixhandte, #2964) diff --git a/sys/contrib/zstd/CODE_OF_CONDUCT.md b/sys/contrib/zstd/CODE_OF_CONDUCT.md new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Facebook has adopted a Code of Conduct that we expect project participants to adhere to. +Please read the [full text](https://code.fb.com/codeofconduct/) +so that you can understand what actions will and will not be tolerated. diff --git a/sys/contrib/zstd/CONTRIBUTING.md b/sys/contrib/zstd/CONTRIBUTING.md --- a/sys/contrib/zstd/CONTRIBUTING.md +++ b/sys/contrib/zstd/CONTRIBUTING.md @@ -7,7 +7,7 @@ or in their own feature branch. When they are deemed ready for a release, they are merged into "release". -As a consequences, all contributions must stage first through "dev" +As a consequence, all contributions must stage first through "dev" or their own feature branch. ## Pull Requests @@ -134,11 +134,11 @@ - Static analyzers are full of false positive. The signal to noise ratio is actually pretty low. - A good CI policy is "zero-warning tolerance". That means that all issues must be solved, including false positives. This quickly becomes a tedious workload. - Multiple static analyzers will feature multiple kind of false positives, sometimes applying to the same code but in different ways leading to : - + torteous code, trying to please multiple constraints, hurting readability and therefore maintenance. Sometimes, such complexity introduce other more subtle bugs, that are just out of scope of the analyzers. + + tortuous code, trying to please multiple constraints, hurting readability and therefore maintenance. Sometimes, such complexity introduce other more subtle bugs, that are just out of scope of the analyzers. + sometimes, these constraints are mutually exclusive : if one try to solve one, the other static analyzer will complain, they can't be both happy at the same time. - As if that was not enough, the list of false positives change with each version. It's hard enough to follow one static analyzer, but multiple ones with their own update agenda, this quickly becomes a massive velocity reducer. -This is different from running a static analyzer once in a while, looking at the output, and __cherry picking__ a few warnings that seem helpful, either because they detected a genuine risk of bug, or because it helps expressing the code in a way which is more readable or more difficult to misuse. These kind of reports can be useful, and are accepted. +This is different from running a static analyzer once in a while, looking at the output, and __cherry picking__ a few warnings that seem helpful, either because they detected a genuine risk of bug, or because it helps expressing the code in a way which is more readable or more difficult to misuse. These kinds of reports can be useful, and are accepted. ## Continuous Integration CI tests run every time a pull request (PR) is created or updated. The exact tests @@ -197,7 +197,7 @@ take the time to run extensive, long-duration, and potentially cross-(os, platform, process, etc) benchmarks on your end before submitting a PR. Of course, you will not be able to benchmark your changes on every single processor and os out there (and neither will we) but do that best -you can:) We've adding some things to think about when benchmarking below in the Benchmarking +you can:) We've added some things to think about when benchmarking below in the Benchmarking Performance section which might be helpful for you. 3. Optimizing performance for a certain OS, processor vendor, compiler, or network system is a perfectly legitimate thing to do as long as it does not harm the overall performance health of Zstd. @@ -273,7 +273,7 @@ benchmarking script works, you can check out programs/benchzstd.c For example: say you have made a change that you believe improves the speed of zstd level 1. The -very first thing you should use to asses whether you actually achieved any sort of improvement +very first thing you should use to assess whether you actually achieved any sort of improvement is `zstd -b`. You might try to do something like this. Note: you can use the `-i` option to specify a running time for your benchmark in seconds (default is 3 seconds). Usually, the longer the running time, the more stable your results will be. @@ -299,7 +299,7 @@ ### Profiling There are a number of great profilers out there. We're going to briefly mention how you can profile your code using `instruments` on mac, `perf` on linux and `visual studio profiler` -on windows. +on Windows. Say you have an idea for a change that you think will provide some good performance gains for level 1 compression on Zstd. Typically this means, you have identified a section of @@ -315,8 +315,8 @@ Most profilers (including the profilers discussed below) will generate a call graph of functions for you. Your goal will be to find your function of interest in this call graph -and then inspect the time spent inside of it. You might also want to to look at the -annotated assembly which most profilers will provide you with. +and then inspect the time spent inside of it. You might also want to look at the annotated +assembly which most profilers will provide you with. #### Instruments We will once again consider the scenario where you think you've identified a piece of code @@ -330,7 +330,7 @@ * You will want a benchmark that runs for at least a few seconds (5 seconds will usually be long enough). This way the profiler will have something to work with and you will have ample time to attach your profiler to this process:) - * I will just use benchzstd as my bencharmking script for this example: + * I will just use benchzstd as my benchmarmking script for this example: ``` $ zstd -b1 -i5 # this will run for 5 seconds ``` @@ -455,7 +455,7 @@ Any variable that can be `const` (aka. read-only) **must** be `const`. Any pointer which content will not be modified must be `const`. This property is then controlled at compiler level. - `const` variables are an important signal to readers that this variable isn’t modified. + `const` variables are an important signal to readers that this variable isn't modified. Conversely, non-const variables are a signal to readers to watch out for modifications later on in the function. * If a function must be inlined, mention it explicitly, using project's own portable macros, such as `FORCE_INLINE_ATTR`, diff --git a/sys/contrib/zstd/FREEBSD-Xlist b/sys/contrib/zstd/FREEBSD-Xlist --- a/sys/contrib/zstd/FREEBSD-Xlist +++ b/sys/contrib/zstd/FREEBSD-Xlist @@ -5,6 +5,7 @@ .github .gitignore .travis.yml +Package.swift build contrib lib/dll diff --git a/sys/contrib/zstd/LICENSE b/sys/contrib/zstd/LICENSE --- a/sys/contrib/zstd/LICENSE +++ b/sys/contrib/zstd/LICENSE @@ -2,7 +2,7 @@ For Zstandard software -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -14,9 +14,9 @@ this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. + * Neither the name Facebook, nor Meta, nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED diff --git a/sys/contrib/zstd/Makefile b/sys/contrib/zstd/Makefile --- a/sys/contrib/zstd/Makefile +++ b/sys/contrib/zstd/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) 2015-2021, Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -123,6 +123,7 @@ $(MAKE) -C contrib/seekable_format/examples all $(MAKE) -C contrib/seekable_format/tests test $(MAKE) -C contrib/largeNbDicts all + $(MAKE) -C contrib/externalSequenceProducer all cd build/single_file_libs/ ; ./build_decoder_test.sh cd build/single_file_libs/ ; ./build_library_test.sh @@ -142,6 +143,7 @@ $(Q)$(MAKE) -C contrib/seekable_format/examples $@ > $(VOID) $(Q)$(MAKE) -C contrib/seekable_format/tests $@ > $(VOID) $(Q)$(MAKE) -C contrib/largeNbDicts $@ > $(VOID) + $(Q)$(MAKE) -C contrib/externalSequenceProducer $@ > $(VOID) $(Q)$(RM) zstd$(EXT) zstdmt$(EXT) tmp* $(Q)$(RM) -r lz4 @echo Cleaning completed @@ -157,7 +159,7 @@ HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) @@ -198,15 +200,15 @@ .PHONY: gcc5build gcc6build gcc7build clangbuild m32build armbuild aarch64build ppcbuild ppc64build gcc5build: clean gcc-5 -v - CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" gcc6build: clean gcc-6 -v - CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" gcc7build: clean gcc-7 -v - CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror" + CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror $(MOREFLAGS)" clangbuild: clean clang -v @@ -230,17 +232,17 @@ .PHONY: armfuzz aarch64fuzz ppcfuzz ppc64fuzz armfuzz: clean - CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest aarch64fuzz: clean ld -v - CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppcfuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest ppc64fuzz: clean - CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) fuzztest .PHONY: cxxtest gcc5test gcc6test armtest aarch64test ppctest ppc64test cxxtest: CXXFLAGS += -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror @@ -249,34 +251,34 @@ gcc5test: clean gcc-5 -v - $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror $(MOREFLAGS)" gcc6test: clean gcc-6 -v - $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror" + $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror $(MOREFLAGS)" armtest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" aarch64test: $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppctest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" ppc64test: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" .PHONY: arm-ppc-compilation arm-ppc-compilation: - $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" - $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" + $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static $(MOREFLAGS)" + $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static $(MOREFLAGS)" regressiontest: $(MAKE) -C $(FUZZDIR) regressiontest @@ -306,34 +308,36 @@ # run UBsan with -fsanitize-recover=pointer-overflow # this only works with recent compilers such as gcc 8+ usan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=undefined -Werror $(MOREFLAGS)" asan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror $(MOREFLAGS)" asan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* msan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason + $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason msan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer -Werror $(MOREFLAGS)" FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $* asan32: clean - $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address" + $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address $(MOREFLAGS)" uasan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror $(MOREFLAGS)" uasan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* tsan-%: clean - LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS=--no-big-tests + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" .PHONY: apt-install apt-install: + # TODO: uncomment once issue 3011 is resolved and remove hack from Github Actions .yml + # sudo apt-get update sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES) .PHONY: apt-add-repo @@ -400,7 +404,7 @@ c89build: clean $(CC) -v - CFLAGS="-std=c89 -Werror -O0" $(MAKE) allmost # will fail, due to missing support for `long long` + CFLAGS="-std=c89 -Werror -Wno-attributes -Wpedantic -Wno-long-long -Wno-variadic-macros -O0" $(MAKE) lib zstd gnu90build: clean $(CC) -v diff --git a/sys/contrib/zstd/README.md b/sys/contrib/zstd/README.md --- a/sys/contrib/zstd/README.md +++ b/sys/contrib/zstd/README.md @@ -8,20 +8,17 @@ This repository represents the reference implementation, provided as an open-source dual [BSD](LICENSE) and [GPLv2](COPYING) licensed **C** library, and a command line utility producing and decoding `.zst`, `.gz`, `.xz` and `.lz4` files. Should your project require another programming language, -a list of known ports and bindings is provided on [Zstandard homepage](http://www.zstd.net/#other-languages). +a list of known ports and bindings is provided on [Zstandard homepage](https://facebook.github.io/zstd/#other-languages). **Development branch status:** [![Build Status][travisDevBadge]][travisLink] -[![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink] [![Build status][CirrusDevBadge]][CirrusLink] [![Fuzzing Status][OSSFuzzBadge]][OSSFuzzLink] [travisDevBadge]: https://api.travis-ci.com/facebook/zstd.svg?branch=dev "Continuous Integration test suite" [travisLink]: https://travis-ci.com/facebook/zstd -[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite" -[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0 [CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite" [CircleLink]: https://circleci.com/gh/facebook/zstd [CirrusDevBadge]: https://api.cirrus-ci.com/github/facebook/zstd.svg?branch=dev @@ -39,7 +36,7 @@ on the [Silesia compression corpus]. [lzbench]: https://github.com/inikep/lzbench -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia [gcc]: https://gcc.gnu.org/ | Compressor name | Ratio | Compression| Decompress.| @@ -56,8 +53,8 @@ | lzf 3.6 -1 | 2.077 | 410 MB/s | 830 MB/s | | snappy 1.1.9 | 2.073 | 550 MB/s | 1750 MB/s | -[zlib]: http://www.zlib.net/ -[lz4]: http://www.lz4.org/ +[zlib]: https://www.zlib.net/ +[lz4]: https://lz4.github.io/lz4/ The negative compression levels, specified with `--fast=#`, offer faster compression and decompression speed @@ -124,14 +121,27 @@ ## Build instructions +`make` is the officially maintained build system of this project. +All other build systems are "compatible" and 3rd-party maintained, +they may feature small differences in advanced options. +When your system allows it, prefer using `make` to build `zstd` and `libzstd`. + ### Makefile If your system is compatible with standard `make` (or `gmake`), invoking `make` in root directory will generate `zstd` cli in root directory. +It will also create `libzstd` into `lib/`. Other available options include: - `make install` : create and install zstd cli, library and man pages -- `make check` : create and run `zstd`, tests its behavior on local platform +- `make check` : create and run `zstd`, test its behavior on local platform + +The `Makefile` follows the [GNU Standard Makefile conventions](https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html), +allowing staged install, standard flags, directory variables and command variables. + +For advanced use cases, specialized compilation flags which control binary generation +are documented in [`lib/README.md`](lib/README.md#modular-build) for the `libzstd` library +and in [`programs/README.md`](programs/README.md#compilation-variables) for the `zstd` CLI. ### cmake @@ -141,6 +151,18 @@ By default, `CMAKE_BUILD_TYPE` is set to `Release`. +#### Support for Fat (Universal2) Output + +`zstd` can be built and installed with support for both Apple Silicon (M1/M2) as well as Intel by using CMake's Universal2 support. +To perform a Fat/Universal2 build and install use the following commands: + +```bash +cmake -B build-cmake-debug -S build/cmake -G Ninja -DCMAKE_OSX_ARCHITECTURES="x86_64;x86_64h;arm64" +cd build-cmake-debug +ninja +sudo ninja install +``` + ### Meson A Meson project is provided within [`build/meson`](build/meson). Follow @@ -178,13 +200,15 @@ ## Testing -You can run quick local smoke tests by executing the `playTest.sh` script from the `src/tests` directory. -Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the zstd and datagen binary. -For information on CI testing, please refer to TESTING.md +You can run quick local smoke tests by running `make check`. +If you can't use `make`, execute the `playTest.sh` script from the `src/tests` directory. +Two env variables `$ZSTD_BIN` and `$DATAGEN_BIN` are needed for the test script to locate the `zstd` and `datagen` binary. +For information on CI testing, please refer to `TESTING.md`. ## Status -Zstandard is currently deployed within Facebook. It is used continuously to compress large amounts of data in multiple formats and use cases. +Zstandard is currently deployed within Facebook and many other large cloud infrastructures. +It is run continuously to compress large amounts of data in multiple formats and use cases. Zstandard is considered safe for production environments. ## License diff --git a/sys/contrib/zstd/TESTING.md b/sys/contrib/zstd/TESTING.md --- a/sys/contrib/zstd/TESTING.md +++ b/sys/contrib/zstd/TESTING.md @@ -22,7 +22,7 @@ - `tests/playTests.sh --test-large-data` - Fuzzer tests: `tests/fuzzer.c`, `tests/zstreamtest.c`, and `tests/decodecorpus.c` - `tests/zstreamtest.c` under Tsan (streaming mode, including multithreaded mode) -- Valgrind Test (`make -C tests valgrindTest`) (testing CLI and fuzzer under valgrind) +- Valgrind Test (`make -C tests test-valgrind`) (testing CLI and fuzzer under `valgrind`) - Fuzzer tests (see above) on ARM, AArch64, PowerPC, and PowerPC64 Long Tests diff --git a/sys/contrib/zstd/doc/README.md b/sys/contrib/zstd/doc/README.md --- a/sys/contrib/zstd/doc/README.md +++ b/sys/contrib/zstd/doc/README.md @@ -5,8 +5,9 @@ as well as detailed instructions to use `zstd` library. __`zstd_manual.html`__ : Documentation of `zstd.h` API, in html format. -Click on this link: [http://zstd.net/zstd_manual.html](http://zstd.net/zstd_manual.html) -to display documentation of latest release in readable format within a browser. +Unfortunately, Github doesn't display `html` files in parsed format, just as source code. +For a readable display of html API documentation of latest release, +use this link: [https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html](https://raw.githack.com/facebook/zstd/release/doc/zstd_manual.html) . __`zstd_compression_format.md`__ : This document defines the Zstandard compression format. Compliant decoders must adhere to this document, diff --git a/sys/contrib/zstd/doc/decompressor_errata.md b/sys/contrib/zstd/doc/decompressor_errata.md new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/doc/decompressor_errata.md @@ -0,0 +1,84 @@ +Decompressor Errata +=================== + +This document captures known decompressor bugs, where the decompressor rejects a valid zstd frame. +Each entry will contain: +1. The last affected decompressor versions. +2. The decompressor components affected. +2. Whether the compressed frame could ever be produced by the reference compressor. +3. An example frame. +4. A description of the bug. + +The document is in reverse chronological order, with the bugs that affect the most recent zstd decompressor versions listed first. + + +Compressed block with 0 literals and 0 sequences +------------------------------------------------ + +**Last affected version**: v1.5.2 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd 2000 1500 0000 00` + +The zstd decoder incorrectly rejected blocks of type `Compressed_Block` that encodes literals as `Raw_Literals_Block` with no literals, and has no sequences. + +This type of block was never generated by the reference compressor. + +Additionally, these blocks were disallowed by the spec up until spec version 0.3.2 when the restriction was lifted by [PR#1689](https://github.com/facebook/zstd/pull/1689). + +> A Compressed_Block has the extra restriction that Block_Size is always strictly less than the decompressed size. If this condition cannot be respected, the block must be sent uncompressed instead (Raw_Block). + +First block is RLE block +------------------------ + +**Last affected version**: v1.4.3 + +**Affected decompressor component(s)**: CLI only + +**Produced by the reference compressor**: No + +**Example Frame**: `28b5 2ffd a001 0002 0002 0010 000b 0000 00` + +The zstd CLI decompressor rejected cases where the first block was an RLE block whose `Block_Size` is 131072, and the frame contains more than one block. +This only affected the zstd CLI, and not the library. + +The example is an RLE block with 131072 bytes, followed by a second RLE block with 1 byte. + +The compressor currently works around this limitation by explicitly avoiding producing RLE blocks as the first +block. + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L3527-L3535 + +Tiny FSE Table & Block +---------------------- + +**Last affected version**: v1.3.4 + +**Affected decompressor component(s)**: Library & CLI + +**Produced by the reference compressor**: Possibly until version v1.3.4, but probably never + +**Example Frame**: `28b5 2ffd 2027 c500 0080 f3f1 f0ec ebc6 c5c7 f09d 4300 0000 e0e0 0658 0100 603e 52` + +The zstd library rejected blocks of type `Compressed_Block` whose offset of the last table with type `FSE_Compressed_Mode` was less than 4 bytes from the end of the block. + +In more depth, let `Last_Table_Offset` be the offset in the compressed block (excluding the header) that +the last table with type `FSE_Compressed_Mode` started. If `Block_Content - Last_Table_Offset < 4` then +the buggy zstd decompressor would reject the block. This occurs when the last serialized table is 2 bytes +and the bitstream size is 1 byte. + +For example: +* There is 1 sequence in the block +* `Literals_Lengths_Mode` is `FSE_Compressed_Mode` & the serialized table size is 2 bytes +* `Offsets_Mode` is `Predefined_Mode` +* `Match_Lengths_Mode` is `Predefined_Mode` +* The bitstream is 1 byte. E.g. there is only one sequence and it fits in 1 byte. + +The total `Block_Content` is `5` bytes, and `Last_Table_Offset` is `2`. + +See the compressor workaround code: + +https://github.com/facebook/zstd/blob/8814aa5bfa74f05a86e55e9d508da177a893ceeb/lib/compress/zstd_compress.c#L2667-L2682 diff --git a/sys/contrib/zstd/doc/educational_decoder/Makefile b/sys/contrib/zstd/doc/educational_decoder/Makefile --- a/sys/contrib/zstd/doc/educational_decoder/Makefile +++ b/sys/contrib/zstd/doc/educational_decoder/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/doc/educational_decoder/harness.c b/sys/contrib/zstd/doc/educational_decoder/harness.c --- a/sys/contrib/zstd/doc/educational_decoder/harness.c +++ b/sys/contrib/zstd/doc/educational_decoder/harness.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.h b/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.h --- a/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.h +++ b/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.c b/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.c --- a/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.c +++ b/sys/contrib/zstd/doc/educational_decoder/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -1890,7 +1890,7 @@ /// Initializes a Huffman table using canonical Huffman codes /// For more explanation on canonical Huffman codes see -/// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html +/// https://www.cs.scranton.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html /// Codes within a level are allocated in symbol order (i.e. smaller symbols get /// earlier codes) static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, diff --git a/sys/contrib/zstd/doc/images/zstd_logo86.png b/sys/contrib/zstd/doc/images/zstd_logo86.png index 216f228061c460c4e4725785486ebcff38804fbc..8abefe21b1e172b9683392057421984f64c8c9a4 GIT binary patch literal 13069 zc%1EdcQ~9|*Z1h6Bw7&NDA9%)z4sb52qJ>PVAL^2??jF05+s7?q9#g02!as3mk^@& zE;`>x&U;SI`#kUSe*b*e_5E??S~IiPTEF#MYp=ccea}s#j+P2B0Sy5F03cRVRn$d) z>!IJ;_*m%AOwVfr=r25!nx+!oA|4$n6H5&9Nd^EwtLLq6gwln2Fd>l6)^G-;v6;b=6oqrn9qQxopyP4Ys+JM<=j8ujbA;Lzu#2u>4EY*o8R(U zNQtlC#p%KJ9A-v%)7!Jb(1{ao%Zn3bZL?A5j}JNg88lc1_Rih1F5H+OXNWY9;NyKDO19CuPZ_a{E@`iZe>sQ}>v@WT&aI z7d&TnaxQfbbJxaYSLVr=^Owntz5bnFs${gIl6!B{@tx6h?au9Jo836B8U4t+Y#I?z z`?jRaW~XMlGrmlB?Vb3G%XHtZ zf1Kp}@oe5ElmQ5T&m2mx{OzuM=*3#~Cy&KB6C{AW}#jbe2|< zZ$2;0KEpga-#cYo7;ZQBhdOB>j*j2AbbWtat*`Qf%+VLTceGNab2y5FPkPux6jr3& z^vql=X~sTU2zz#{y>~_)WE_=9@#w{zbrK}?W`e?JgmZ)5r&EWnojv{(CE%Nz#;@Zu zTcso|KK8 z_(d{_c6_;Y>oI5CP7#q#-(9giP!2`9ZvKm7hES{cyVh(rc(7=qJaUVM?<$)Ye6Ano zyL0y$7eM|tJ)CZrjH#}7rp&B1W4%rjiybSvcNa~=hQbJnL?&WyJQ+K>p>={6YvMjF znII#!C`L!G`>E$Ru0;4wjd;grz}~EhPj9+);K)Xd|8aE&lq>-Ymad@VH=a+cfNb%N zUy)c%eIE+p5|P^J5!$RAN#)F{P8==DFSZ1;41R^hjHxZF+Qd$Vuw37%R^AekR&^MS z@e21X1IYB!I$WQfpfw!#8ESA=gI5bZ-g=$$DZl+y9e19XZvC2*-!bcJ@BO)>1P>aj z{$|_RZ$9JrHgV6?G);~CdfSpdr{+wnu;86;-s6mPpQTxwmv}IT>%m{)>ddo|hvchr z*CVdI%L=EW7_pv+o->nb18YD^c!cJN2sL_n_XBp<0}*#yenRprMKu$ zJpVFe`EgHRVpmxr+9Yi{{N(=V*Y_n7x+a0+*S|te;O8g-4ZW zRM}LHI*Kv>kk@%T>DqnIn%hcX%sHonoY!uP z(qE*i@+$0VGw+z6_^b>0*zF``cJR#7`Bf+v%Y`J~8Z)hF-f-OTHIdA(H=w&_nq9&Q zyu7gN!6E|Cre{?BNN7?iOYvxxa%WY!$IpFp<&?xWSpYzhbAyE^mvpgDS2v3XY^e%qb0r~kWIU@5frdV#?-Tk(=$Vx9``OVmE4|H>IL|#MHtFka z`lNU8yMD9ASAz^z{$<7sK|)&aN_-h?#yKPxV5f0?W~%~4fV3_bil(u)njOl0$c!gT zVxI06b}taMrQegq{F%XJAl&>hvc}7vxpP@h(97IGcRY7;@U*lNv#zNBsTf2+vMGaS?muAO!`l3 zP$b`pEZMC?d2_}O#oJ5ukPQvi6~KDB(9EgguO}~o zlE-i83?#nTuyG2KDMT@xy|Rz^Af_YKJa`_2)k?4h8Ff*vqlLK1!UMj-$t7QV0i+0p zzZi}OxP3V5PO3b`-rk$Ykc_#u^g>?+hEMd2D)Y=D{y4vrq_pvds+-s5Q()InR$}Vx zOMHi@KBw0Y@U6w1RiA&{H(ZgWOw7j8DrD0phs!I_Ne9qq0#siU za5Nd?YgxOvC2+YbN{ss)SM?QxuoLo-2^zxfV*9dx7(?e=M8aM@p{SvQACJF6QgY$?U56j3YUUSo^D--xTOEV?F!T#-}xF0TzPP z*!L1xQOQJ}s2sv!fMp0V-fqw;e0Nj(CUv4dL0WTm?2Y(_c3Rme0zE;jFA?*>h)t39 z$E=9tYFk7?VAAs-{B7|wy&J6&$~yHt)=Q-nGR6KD}9!_`xc)blSV?m8`munk11qq25)G5h$s88 zWGjJt3s>?NZ1wO3f-iuTndO45X8eGU*DsEi{rxZw@P9Z&cpoU1(28T0w1hnkFANotSIe(o`d?Fo$y;cg&g;0YX7<6m>n7^Hy1V*#$$@w*jfIjA< zoL?|gRaV$QP8FaX^ALk20W?HLfDf5jWg5LLlAzjD+V1K58nCU*Hmuhau+C^CGUx1gm&2NS`C7bFY=oG*7Yj-h7%POq^cOM@K9W&e)!CEQMf{g zewRQdkscF&hrW^_xmDn?a2j3AoLw@%m-h>VZ}Wf@;ycF!t9!Op9TM58B=}Aa~CU%fz>Ig{F8uAw37gkg8h18Gm*YyAqp=6xmh8OEQ>AeRfRS? z%HHrCHAu9V+HJ)LrXa-IIzkw{G#bMoL?P+e3G^d(+2`R*1TQ_(vpE$*)$$y zeJ2x0{-*3^f+1?CyadJ#uC@s(asJBDrEb5e;~xKmh+*{`9?pX)RrA{AIRAQo7)5{# z`zN_QVe-h$s95swUMg#~Nce|JptKS@-i7paWd|Hvrsj5G>Iz}y^35!>#%pg4LFIyC zOV3lkfWkf*N9&Zok6XT_Fh65i`$^B1gA(%*cCdcpLPtSf*`BI_)mae0TwPBO`PK}S z0Gec+d0b!b%r9{sdJ5Am zQ4BN^8=I#deJ2I#)7Gj)%9ha95g&Dge+9viesGqFk0`D{FS>*zodyYQ};D4;f;+Vxr39GI1`duWh2-}6|o zZzu)#P2SOtgP5d(SgD0p&k@r)VYjA);|Jr=H$bWAhM*zt#o%T-(`Vzvqj6O1H|Cxy zWH_!nnPHEgYWJyHhA4WT0C88&p$d^z3n4vi%_%1Mv-CNtgh^wLb`--|pRdgo+5qe# z2)mfOmDEJmzfm&-*ma9+E7feb%bDaqu+*b8^rQA4l$AoUuoh3Ap1-=s-xirt^2$lG zaw2l)j=|t*I>pS5l;uQ1DlX$S7Ots`h&3@6u7T0|5)HEa&-s%yFRj@*9zWonR8qSa z@+lpO<-s3X%T*y6%sfMR1M_t#u@;~5<0S6Hmzl0ZYI}4J$hx$;rj#n8%@0pQh^RCw zWC*d2I#_ESkG|9!wqBiJJuvU0iF_oFi_zb&%wm?5K)PLD=_MNq*}|}5EIDv5zcGA+ zfLQU{SUIhp*u`6-IFcaOAON?JXADD}w3>i~vB&GRp!Gm^)7a4^gV1C_K@6w-|du%z+unWU7C6c)&1!q_E(dstk^JmC=(IVybgAP5!bJj|VsUus@M5 zu*knU2R}(bU5nnr^h=0+qqWD&e;!0eLQ`Y$it5HvVgj}@fNd)MdI4;^R<)W}RK}2j z3DfRboc$Am?KQPE&$i0P_lIAx8x2=kD;f{JnPJ#0cCU)61G+f}$9Y%zTVXr0Cdn}i ze`eV^YM|521g+T6aM}lPCq0;Io}rOWOHlS`WWSwz)a5?#w4-CNC2OMX`)mB_F-4?a zeSElTs$AF07hQ-T+)BaVBcSHQoy67)!hU`3gKF($=kU~ii2?TR*1~%#BqvM{Xpg&Z zb}FbTKqwUF-vw*Jgk{#$rJiAuiU1c;jW`4&b^0;Z~MsRkIN9Kse>fHYcT^qVJNbDev{% z%6K(F)n@~Tdfc>$6=sXm6{hd(sc*0aMuQ{g?6mZh3}*2$lUYnz#@g*OQwViB%5i7m zw0UlZo-o;&;I4foyG?@=&=+L-1B*2ZgfNDziu`O1J zqF)cck>i8ug4sK7Mz*OaHs(wZ4+Z8bHr}04mXRiqN2LX*jArpZLj zg)<{#xaRNkCTWSG`YU9a({%BBQz)fVM!johrAI-q$7flCAN4;s*1`K3WLCl{bvU1C zIIG15uTQ!+bHBU`Wt7GCM8q!V@N4z5P^|Z0egIp|EfKS>R z;k&f02^>qT;wvc@0o#Sh&L26je>w;5LTH#CzRkDqpQ}bVT&!dp)4CJ#7Uin?B+V|j z$yN-WWoJ2XlqkaP#=!YR@kxDC3wJRG6t|zSFWLo$QPBQ);{Vm8i>)AH1rF0~>v)-y z&5_zjS8gckB5BWG9{G+iC)%JWCI!*CbHrvg{GzOdEAH{AcVp$#6hx}>$hLh!fTJd* z?F&xd7g&kL9|zCe+t1Y`3p1r;yy~#x-N}^$g-;H;U++#)erUlEvUJ&-p=B-F61wI;MQOw%m@>3876pPI})^hVnvZ zss8fj4EAz?IvEVqhTS!Rt-^;O$GRn7Nbd=t9jP?T+9S5YT^)W($neI^@mnJl8K=iU zXOFWK6x)O^{{fP5zFiPOi&%Zjo$XyvCzshU^iouk@CXIo}7~ z7>AqDB(7G?IQLXuk&wrB`rHncYtXk>4eZLUNT+ZfuV3+|Pp4il_RN6Fb$$%b5hkGn7)O(s%^1YRQ+ z9T)Au$Fkt&h;d@}@Q4j2rne^YBcPOP2=UG7Bl~94PhpfXV1q4K(Xyq1of!3FgNKAN zGpqWgq{VdzZZwua#B;g$MC1mQo!>Pm_Qd?>8LHqGG7=jgxNpD9fI{##e|^vO@^p*< z>?D0}{v(#b`X5I+GI7G+02O$Ey5t(Zu`F<--syg3aNh+o^0ZDf0JWmSQfoG`Z$&|= zOF(-)OK)xrxbFU0E|UOLL?yt!jL^8_Lu|p?3|LXx1P+vHedVs98@D*9tfX&d?G(?Q zVrAD0ydB%E9BJqEK_T=00#MzBz8F$jFZsoDFWfS(+C#cFoX`#Q-a-L*M7>=Yu_TX6 z?L`B08~_|yGN&0t$UgAB1@2UjPSH9hXl2}|q#|ZnNH*xCN6NNuYYMWuwj_D8qqccc zawcq7|4|xS&h=*aTA5rysRyklm2kr)ndgtiwc+%Ro;0mp0fdi_h z`HZMv&NE*^V{VmS7o|Rt;)M5E4HGaVW-_zr{NO~&6RLF7@_djbkvFAh$I`EQV`u!u znn1xl>Xuq;lrM#7>CuRiF36TAiA_owDvXMD6ZzuH!SQkCf#F!9icMb{Y$O3pX}FX; zA-AdX)y<{^@_n53OKi1yyk>*g*u5b|SC#|PcfeB*hgr-ODiy8nZ5RoC@MkVEpS<>= zS$1x51`ifo$vW$s2R$D-^%8sA2g74v0F4%hv1idX5%4swmj$2J-UK|xgphY+0=3<3 zeIeb|C8h;C_{J9)J-Q7(JPZ;7x+nlA+zzvspo14i{w$G=;(J9r{7OPe-(|L5$dT=NHTC^gafTcgBy+QitacwR5 z6Q1ILut^z)sp<2X!#SS3^fviZBHc*Bz}mPB-c@RhKDSB%vwB5&BXRaj;<2^UCY_vv z>ebUn04PXuIccPYhD6~#@rNfYj1L*^5QRe8H~{iW_FU!4%+E;xxGLNyX2FCH!2U&K z_`B8a^ij5eWMhmQlbZI^6TMBbGuEUb)hJH@^DA#0xJIX?4%WhOc5$KxiGmB^L)@V( zf_{N=GB>uP{-(g|{EYPETbS#;IyA(@gctk?4U7MvZGozU3`$2n`9xg;x(df zEi$Tij3kygMUm;vxp@R9s`<^VX*Dqp@^_Tq=2|xh(eRLBRSrJ*F2t)(2A5~^Z5ilQ z!J8ZrLD-^lO{I(I*GfW;L_}8 zai>pl1Q3}EtMNnjS5^_33@0mYq~@ho<%fjfFT^}G2|K`igUOnoR>`C%`6M&9Ja-R| z{PvSi7~#xB<7{Azfs|7)c(J1$+j_X)4l^5R-&6OC(gK_HkaAhrV2wc0jSnL9!#ONUx=Yp1zMjL3(E>QgPr7ZFqjrxJRvMWOAEfEv%MF%P|7JSHv z!Y-D`B^|SN0jf zoNKD)Y{fLI_wwEtNN{T2(nVGGlvz8b5KlGl9 zc_+W<0A$EYcA`E3zcIc1R*C94ZGjpjQOvr|LPB^|&)VI_7esn+!p|iI9CGybTA&tL zeQikEx399uv`JNN-plq4XoH3BQQB0Qz`A{Qslt#GU-(Di%5jarr%dvJ(Oh>pKFLQL zJfR@xoQU-~3a9(1%A0D!21fE^V%|)c!@CTbo=Q9JxF*6$fhTU}aB7K-zlr%I%`t%S ztNR=_z9g2Rai>`!mOw_%sIl=r{_v>`X0zaCP!)I!4YHFkY+$V2*`i2%ZG6h(cyAJ@|ID+dNp^P*& z#4Vj2d7)O$7BF5HfLhwaP)rsu8@Q7caI5YkkO^)j1vC`W1ZyG`V772o zZzN35TT9>4+ul;l3W$CNlk^lv6F9<9P$o}D2Pao?PbuI}UUBsJ%FG93`UyeVO972E zb(j>KkuWAfUO`?kNXZlK&JUC(V3I^yS&Qo`D*r)&)}(;8C=^1RkI%!ygV#fV*BNQU z2N4q!;{)^a@$-Yw2#~9n6AJ1Ha&l$4qWHz32y?YW!VxIAvlG*m4z+N0LrDRFXg|{* zbL>G$&@bQDeq98Coh+mBF_jt5dQ}fSgC)Yo^i0&t! zCltX4;RW+KI{q!f6{X~k`p*%r`d$bapDxVR*$rt4Q*wtnp;&&WKsdO${?6d$3cE7> ztlPoLnh#ynPoCdnRMa$e{?Waz= zO(hByg@_1Q34z35LLz7?5zDJ;tUwSeQDO8zh#w*V{YiDTY~pe{YEnRcUhrQQ9S11N z+8OC61=N5$xq1HO(1$z1^ia^Nra^>-#K8PwLPA1<5U?=6$X_4>7}6DeCayRkU|xR6 zPmh(QxH6g%ir$-WN2m>q58-6<({Z(m;yQ3on1hibn%(JFC`4Ek{NLR7e|Lk0ez~s} z^{N%<20&52T>sT0aRnp{igHHkJ3Bi_0k0}!x+>`B`DK#)<7A3!I9vV<{0W9x{dE31 z-ttfzzF%0r{{;RQCOunc52yb-p1+`fvdAG(9?nR6ZKSq^9n2E-pLzZr_)jKX^o7+G zh4fPUUnce6aFV~amnz!kjP&{~R1fCzOZ%ldz<()BKd(6AP|IKKcZIsctbl)WS=vIK zY+&e1JKx{!@PBg%zycN!0SE+!J_ObRAoR`>0$IRBtwAt|6&NOJAq0V13;vzO)!7>5 z0Y$>(Y|wiEz0T;<_;a0^IDYP3&cCBQY+>jkL11BVFc`@9$L#oipPnS&zcxVf>iQ?H zcC~Ny+zXM`8}$lIc{J)Hhy`hNrb#h?MV zM4zAknEQ9gueSVlle@Zi^8Io1MBj_~{=6Cgu`T`|zy3+`Pm+I<{FCILB>yD&C&~X; zNeKRWoQFA~f9QFjAJ)~WL*Ahu^sgaQ@4Es3gydIm3_#ioY5)M^NJm3o>FUn_mzV!W zc6R}Ldw~6Yz`+6F#}B~aAz*tOu(NYzM;m|H(elgkC-|3&9;3m(@V_j7j?wa`YikRz zwR0ugyMM_q%aw5lu(=J`+P)fZZe5N4P=CtC)?dbr%`5c}+5AJ+HvsFKf5=~!-(+q5 z4_RINYw$}}R{xOYmA}S6)yvDvor{a@i;La!^PThaxwEs`v$NrolcAH7w&Ua0&e>$apnIO_K-bt3l(I?e^j0B zIZE;}P4mUWh}jGLQfY#Cx_s0qX}`&xDu%mLSriHkz&AI~QsO5Mr1A(PmI;W$UeBXQ zH!03^EIWivg8SJ3nPtz|n#MM(vUomuCQbn&CW0BG#bZI~IGvYiAVAy zq-GO{pFa3y@zFtBd3~=_D4|>)cb{&Bna7UJ(z|Ic98~e6Zy{m2stE`8dt9ciL_(@k z5?(e@-Q6E9hXuWjN?<)(cCyDxd)rk~k7Yy1*7Q)@-hVN_rtQ+yzl@CJ=1$ys@B;}I zxw4BnAIr?cTf`M{H`NgC?|a7&79Lat7RACke$395O?umIAOkYWN{ro~`GSe&KGl2( z69$hRayC}j->L(CnFc*v@Rrw^?>a8KbM5(?H_z=j*&Ys$>?XU4d4aYCLla^YUcZhg z%zX(Qbbl9rz-gF+VSLz8$z%?CV$^Q*tV2*gnAyGvL{C-_J3O$PM;N`qFl`p#FM7`D zY-kOdlMaDA<~eOTif7NSrLyhu%lFG;`sx6-3jkkG2zPk4k6rpdrP~b7_Utom5Aaag ziFmvSdEyw6Z351B`!4eb|PdU5Y755O-xpk4Mlq8l>^4Q@KRixji z8159~*FDoywLAhns(e)0CZo);*4`R@8z0L3o+f}$t}uCi0W;W3{MiRbc@mM&@r-*J zpK25o+A|>e-+Sj;=ci{sZ-3uL5Lk>Y;vw?2G_xC@nuUH##G2zwa*=QfVYzr-Cy0d; zsNQ*-GM7l>S_OqoCg#k&KEZ0_8-d|_@#fS_003Utj&hOmaMjhtTTMwzu~gnX`2PSl Ci_^6L literal 5963 zc${^Y1yqzx+sBvgknToMkX^b%Sh_ovTo8$6X{1ZKB&AD05F}ihr9-+)T#%CP6oC)l z=Xt+;zBy;+o|)_ayJqIP=G^B@l$M4f0WJ+L001CRRs!ohe03kHCN}0nn_hCg0s!Ea z+snymDa*+*X}Lme?HwTifKpUy8kU}}F-`xWFR)FEB+^|L8sd(s^tk|gvQt4RD2zmo zDKav8U*R|NSX9F+25KEWs z%Wc5RY-vEs%Xp;}OfEno&oD{EwGW@Q)mp|?KoEw24A$eI-XFHn60&lk02=-^^R@05 z0QLBWVGj7+?d3g9bZd_q4?v4^5T{kmS!|GF+;KCy#~8qr{N@c;EjYOq1H#U`$TW!c zSkx!elta`qv!!$s9OhHNjS(dC!=>jdAW?B#j6W+Yc1Meo(-hYSm3i<~+jHYYR3hb6 zD>o|_;rDJ=OGx92BWJz+)#CvH(QF%P%|pmjY-EUPOk&_4el#Y@&!kk6H;@LiD$%^c=fLq>;LjdPh|K-r_oC_873y&{I)vNkul z2T?0oVmErxw=rCm+M95cx6iCFO9Y(wrf5K^Ft{00D*?$Y{4&09H}%`*e0a}gq0V@V z(ebUW2ueYnSV8S6N&yqLQ4yo61=09-TlP&Gb)j9j*%uvdrA<3t$?kqqjI3{Dy9vwW=BV9~`KNf*q64lMK_Vqz4lPMr=6zeK_w3ycA@U71ImD5f2R zS~wRWxA}y0DE%^cA_SwMsw*^G7+xK$Oc^d!DPiRk8aQ4qn(mgb>I z4>1kr>fP@X{ILl0Z$w$5Vn{IT&HC};h}oM^3eP-jIb1qcI-)&_u_urv5oZzIAq2=A z&%{PXJctv4ZHFU_BNqlMM4brQ0^>8EL~=!CMPWrc_0aY_UlmbKvH`m?%O~n6?JAKM zD@@8yT2G4ez5k@@$p&P@8@OJ_J<&daxJqC1hb!=Y@Ybf#WlK>{2~F`FL>m-;n)+JF zqqI-Et}LtUd>U`MbXwEZT@+8Wo=(%b(k#`iZb`jq?gZ?E=0kM-;iTzg>+_Ly<;gs4G_ZX;XKh+j1oGub5SJSQ6kJlO1uF{bz;)XEc%VB0E_0129 z^feA$@hQ5+_hdR_7w zZ^oc(U%RDDsIaQ|;M1r+&_KH6P77@oG=n{5^r6PQ$kbX*S9MBLx4235G-NZ9LT=~< zm%KyEtZ}8KQ*(Vqy^L?mh3I~|8l(h^KhC&w&nAB7)z;OnlKaK>*;ec>&9=f0#g@>v z)CB8x&Dp!N`dyE$xE+UGtlgj6!dub;Of=Q_SD4}i&G=F@G}H>T&LBr#Kg(jP9-%?O z*Pu6!YJ#5pqo68Ygi{E=<@@5yJ)v#EaP@t)>k|DE;b9AYU;f|)=Q>2wd{ewzxSPW! zlD{;=H^VMVIOEb3#2vzo%WcB_a%?J(QW(11TII9+Y@d6Io5)D2w$K>Eh};Nn+y)nd zZ<-hyFV`0=UM+rt+rxdV9c;5+we(*g=)d#KJBPlA_%7^=PkA@uqR#_`M{(a(ydr5+kA%qx&rr=5o_=t;Ujb8QZd34SeCgtjGXlFH%xU zAWtbzC9f3GjQENmKokXd22}iBysfzlzB9c&tPxx=^OW=z=wP<+kg;Q82T4@(9T_Lu zCyJCkqaq~4MpFFt4(>$#5t2qs+(o!Ral$suDZqNx5Z)MGfLB2wMO247gtv=Z8Ue(U zCc9(q;ySqB+O?Uz?;RuC z?LyUp{z9GsxKbpyD1&u)`=x!e>)K#}+47RPfu#Y=&|`gjy>e*Re81++GR-UXuFQD& z8wb2O!p)nFxk}bb)fZlE9kuzNR&IZ7ojZkRhF^!HNnhQLYq1%!@iaGd`g9{1d8Kg! z_7+~%!RGo7ds|m4*wffI*v(a&J{Shp{`$CD8l|qB^+xC$6j_JSAm6xgV9-$M0f~k* zLD0U1GzdcNoPo%@fc{!~!$GH^%LBIkl)V%Kbp0;}lgn_(+?-0lM$Xf*@{-qaJ8?6N zUZLdCQ1Rn(oY#+s504n>@xyvGH@f-Sl53@WpqFP^@sQ z4<42tIp>7u=Me&gPR>EJYy{mAl#BYy93+UH8*!rFSUZ5BOs1WJ}CzVdG?*OT$p(1>}WO zlWkyS)3>Udb&r-^4&OcBOsL4xt9_3>r`@b&uZxmzBc~0m&=<`k2><=j5OIGm)m^=I1`NTJQ`sJIF<(Bpv{=M_E{!qM6wm&yO=^k`fbCtYP+C27Y?1gz@JAT~ftLS@^;ykL4)Odr?|5ToM330D$Y! zR`Q5sW{R<%zkj<~EEw&ARUH?{egEjave4|53E8xzm9)BA>l8X*8M8z9xTojpASw!4 z*00C_7|-S1|61BQ;sGdJ*Os30i2@E|-ID}jp--Zf>@{;D002}>dp#p}BXu<~Yp4^C zr47^y!sG4a0{BxTyu}{6P7rrXCT}N4XE!l#NtS;p#2)&8rhzO>|B|>nNU|8IYca_| zT_H?@JOVr*7AagNCMF428(T3Qu)^Q?!$^|lmAku(7!c^?<;CO0&jWR}1M-TBiUL7= zKt8@_4~l1QKF;oz-p`!fSpRbJZ$Ds&o3*RGi@QD4nd#5i(hBO~F3H03x8|?%*Fq3) z`~O(%|ic;*v!5vm6BxLj&JJl(o$@uv z7J+tSLREXe8^+FLYZtY`2<4ZlGq^SoO9;=(OBhBIZ#+iC*VQpP(aEgMNY$L6CZ$=Q z`kv;t)g>yd9colkt(a!r@J{pr)83>FQ85^Csr6QmY$?a zS;Z&OCLmA8Zn{NK#@NvKQcHk;#fp(RR*)3443-e_bHVjqo>bBRb39a>!XdInBnx~d z(k`2J7$`VT;4tf5ncZN@V%t=Qa-NE(#wP+;_?=(P6Rs}qqkgeM4wp#58y4@m0}Ho> zSK}0;)#=!R%u_mpZ+%H0^?qkl9?oPMMt?jpB8U6Pm(`M$!2vq<Nb3&8HlD zI05pkvE%m>xlJx|H88DV8TPsRP(lVNPpLLbM$nOx0)-}RBw{bkGjOIVd0s=+nIq)7 zUgU%p>xN&u;|y%^vvfYr2=v00-PE*19<^e;k|XHG9@nccTECJHrAO2(WA92QF#hx;`Mg@@sKQ(y)`z7 z&EiJgouxdvqkG&OreQl~t$^n;>?f@t@~NO;l(JX}^gOV#8wa#C(jDPT zNkqSspsCahChZbi_0;~#s~yC^T>Wq_WyooGqIz3mkuOuHZ81-iNiJaci9uh|gId5H zFLmVbKciSk&onBbH-S}N0xki9gO<_6*@y4a?5p-#2(jI1zM?{caD-8<$kYwaF$h+r z`XZ7kKEh-0R!llDi~yr5TSo53D(Bnp1~O5$YHqc4a>xe1=(2fCMw5@8rfv;Y=m9#= z-)^o51PShRcvJ5AD?+*myzfq4i&4=xXjr&bQU!X2^ev>C;O*8tLErSv{H@p+fUIq*UE7+vsSmgwhLAm zQ?NCXA{klZB@IYpbuqNNJ3b;*h|&B?X4PcRxf!GpLy62FsG*Ez+5$U9QxUS^2MhfZ z3+E}=J9pN64Q+9SXIz~h-dgk8)Ze7S6C^GR_@uH$CTNFgUX!7oP6pl`OQlma<#Q5W zr&BJEfwYRSgsXNU2$O9G~HJKwxZZ3fcpJoboL@xSY0dXaLAEMrWcAma{mZGPa@LMR_eqz?)6 zLElN24}ykR(G}lBV@i{rn|6Sy8qO$h^$M}M{z`n$or>g?Rv-2B?U^xk571s#G2VxS zA+iC5a&$yidwiyIZ1VY_r0R#}J`)m3R5wJuC#A?DQvH1l6z0Fi8qY26j&77LLrC60 zseEkJ!xklbf;Uq-UksM4^Gikno824(Up?Pe|J zwx>)BI@1tJW#i2~4v+5)fVEepvK}t}%znlcdLzROdXKLSp7>R`QfO=M!r(TlYBQKV zRCv$i(*ADRab{V$z*_H~>JxQ5DH6(=PL1y-SM@0Q$D9{|EvpP4B_aaGhl#6kqn#hX z9HgEx;1`D&>4u*zV<%A&qAE)#Wh=w?zNB)|Qgd90>#~=Wa+SlBUG@`J=NZutT;U!% z>d!A@iX$C(+m$r2Pk%!aSvuECs8vWvyPDX-Bq~N7ZOllBoCW>QxL>{_Pf!^KM~l87 zc5B8YH7ClMOWr1SssN*W&Fn&PbNZl*VQZIJzyVy|U-D^@rRxp(YKrFV+u^(e4r=Ya z#>wajQKn3HNN&px3;c=hJ4qe2SIiMe+PU6&@qKCG*sVL&vrUhTlF|>ShKNo};w~!xobZ+tHtaqxs^s_-5z6 zO4LZ;=2tYWT;JPs^Ur{L?^~-%HE_VGo}$g+p^Y)r8HBURIBRM^Xb`&?9ai9@v}`E=|8hsh)h&zgSa^P3QCo}hi>t4IBa;`C;{OFpP7n?-U z$D>FOTaAB>>kc9)qw^^O%s8}O<(Aq6rUt}Skkq%^>4) + (Literals_Section_Header[1]<<4) + (Literals_Section_Header[2]<<12)` Only Stream1 is present for these cases. -Note : it's allowed to represent a short value (for example `13`) +Note : it's allowed to represent a short value (for example `27`) using a long format, even if it's less efficient. __`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ : @@ -521,18 +522,37 @@ Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 01 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). + Both `Regenerated_Size` and `Compressed_Size` use 10 bits (6-1023). `Literals_Section_Header` uses 3 bytes. - `Size_Format` == 10 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 14 bits (0-16383). + Both `Regenerated_Size` and `Compressed_Size` use 14 bits (6-16383). `Literals_Section_Header` uses 4 bytes. - `Size_Format` == 11 : 4 streams. - Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143). + Both `Regenerated_Size` and `Compressed_Size` use 18 bits (6-262143). `Literals_Section_Header` uses 5 bytes. Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention. Note: `Compressed_Size` __includes__ the size of the Huffman Tree description _when_ it is present. +Note 2: `Compressed_Size` can never be `==0`. +Even in single-stream scenario, assuming an empty content, it must be `>=1`, +since it contains at least the final end bit flag. +In 4-streams scenario, a valid `Compressed_Size` is necessarily `>= 10` +(6 bytes for the jump table, + 4x1 bytes for the 4 streams). + +4 streams is faster than 1 stream in decompression speed, +by exploiting instruction level parallelism. +But it's also more expensive, +costing on average ~7.3 bytes more than the 1 stream mode, mostly from the jump table. + +In general, use the 4 streams mode when there are more literals to decode, +to favor higher decompression speeds. +Note that beyond >1KB of literals, the 4 streams mode is compulsory. + +Note that a minimum of 6 bytes is required for the 4 streams mode. +That's a technical minimum, but it's not recommended to employ the 4 streams mode +for such a small quantity, that would be wasteful. +A more practical lower bound would be around ~256 bytes. #### Raw Literals Block The data in Stream1 is `Regenerated_Size` bytes long, @@ -552,6 +572,7 @@ ### `Huffman_Tree_Description` This section is only present when `Literals_Block_Type` type is `Compressed_Literals_Block` (`2`). +The tree describes the weights of all literals symbols that can be present in the literals block, at least 2 and up to 256. The format of the Huffman tree description can be found at [Huffman Tree description](#huffman-tree-description). The size of `Huffman_Tree_Description` is determined during decoding process, it must be used to determine where streams begin. @@ -561,10 +582,10 @@ ### Jump Table The Jump Table is only present when there are 4 Huffman-coded streams. -Reminder : Huffman compressed data consists of either 1 or 4 Huffman-coded streams. +Reminder : Huffman compressed data consists of either 1 or 4 streams. If only one stream is present, it is a single bitstream occupying the entire -remaining portion of the literals block, encoded as described within +remaining portion of the literals block, encoded as described in [Huffman-Coded Streams](#huffman-coded-streams). If there are four streams, `Literals_Section_Header` only provided @@ -575,17 +596,18 @@ to reach a total decompressed size as specified in `Regenerated_Size`. The compressed size of each stream is provided explicitly in the Jump Table. -Jump Table is 6 bytes long, and consist of three 2-byte __little-endian__ fields, +Jump Table is 6 bytes long, and consists of three 2-byte __little-endian__ fields, describing the compressed sizes of the first three streams. -`Stream4_Size` is computed from total `Total_Streams_Size` minus sizes of other streams. +`Stream4_Size` is computed from `Total_Streams_Size` minus sizes of other streams: `Stream4_Size = Total_Streams_Size - 6 - Stream1_Size - Stream2_Size - Stream3_Size`. -Note: if `Stream1_Size + Stream2_Size + Stream3_Size > Total_Streams_Size`, +`Stream4_Size` is necessarily `>= 1`. Therefore, +if `Total_Streams_Size < Stream1_Size + Stream2_Size + Stream3_Size + 6 + 1`, data is considered corrupted. Each of these 4 bitstreams is then decoded independently as a Huffman-Coded stream, -as described at [Huffman-Coded Streams](#huffman-coded-streams) +as described in [Huffman-Coded Streams](#huffman-coded-streams) Sequences Section @@ -945,14 +967,14 @@ |:--------------:|:-----------------:|:------------------:|:------------------:|:------------------:|:-----------------------:| | | | 1 | 4 | 8 | starting values | | 1114 | 11 | 1111 | 1 | 4 | non-repeat | -| 1 | 22 | 1111 | 1 | 4 | repeat 1; no change | +| 1 | 22 | 1111 | 1 | 4 | repeat 1: no change | | 2225 | 22 | 2222 | 1111 | 1 | non-repeat | | 1114 | 111 | 1111 | 2222 | 1111 | non-repeat | | 3336 | 33 | 3333 | 1111 | 2222 | non-repeat | -| 2 | 22 | 1111 | 3333 | 2222 | repeat 2; swap 1 & 2 | -| 3 | 33 | 2222 | 1111 | 3333 | repeat 3; rotate 3 to 1 | -| 3 | 0 | 2221 | 2222 | 1111 | insert resolved offset | -| 1 | 0 | 2222 | 2221 | 3333 | repeat 2 | +| 2 | 22 | 1111 | 3333 | 2222 | repeat 2: swap 1 & 2 | +| 3 | 33 | 2222 | 1111 | 3333 | repeat 3: rotate 3 to 1 | +| 3 | 0 | 2221 | 2222 | 1111 | special case : insert `repeat1 - 1` | +| 1 | 0 | 2222 | 2221 | 1111 | == repeat 2 | Skippable Frames @@ -967,14 +989,14 @@ Skippable frames defined in this specification are compatible with [LZ4] ones. -[LZ4]:http://www.lz4.org +[LZ4]:https://lz4.github.io/lz4/ From a compliant decoder perspective, skippable frames need just be skipped, and their content ignored, resuming decoding after the skippable frame. It can be noted that a skippable frame can be used to watermark a stream of concatenated frames -embedding any kind of tracking information (even just an UUID). +embedding any kind of tracking information (even just a UUID). Users wary of such possibility should scan the stream of concatenated frames in an attempt to detect such frame for analysis or removal. @@ -1183,7 +1205,7 @@ -------------- Zstandard Huffman-coded streams are read backwards, similar to the FSE bitstreams. -Therefore, to find the start of the bitstream, it is therefore to +Therefore, to find the start of the bitstream, it is required to know the offset of the last byte of the Huffman-coded stream. After writing the last bit containing information, the compressor @@ -1225,9 +1247,15 @@ ``` Number_of_Bits = Weight ? (Max_Number_of_Bits + 1 - Weight) : 0 ``` -The last symbol's `Weight` is deduced from previously decoded ones, -by completing to the nearest power of 2. -This power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. +When a literal value is not present, it receives a `Weight` of 0. +The least frequent symbol receives a `Weight` of 1. +Consequently, the `Weight` 1 is necessarily present. +The most frequent symbol receives a `Weight` anywhere between 1 and 11 (max). +The last symbol's `Weight` is deduced from previously retrieved Weights, +by completing to the nearest power of 2. It's necessarily non 0. +If it's not possible to reach a clean power of 2 with a single `Weight` value, +the Huffman Tree Description is considered invalid. +This final power of 2 gives `Max_Number_of_Bits`, the depth of the current tree. `Max_Number_of_Bits` must be <= 11, otherwise the representation is considered corrupted. @@ -1240,7 +1268,7 @@ The tree depth is 4, since its longest elements uses 4 bits (longest elements are the one with smallest frequency). -Value `5` will not be listed, as it can be determined from values for 0-4, +Literal value `5` will not be listed, as it can be determined from previous values 0-4, nor will values above `5` as they are all 0. Values from `0` to `4` will be listed using `Weight` instead of `Number_of_Bits`. Weight formula is : @@ -1260,7 +1288,7 @@ The sum of `2^(Weight-1)` (excluding 0's) is : `8 + 4 + 2 + 0 + 1 = 15`. Nearest larger power of 2 value is 16. -Therefore, `Max_Number_of_Bits = 4` and `Weight[5] = 16-15 = 1`. +Therefore, `Max_Number_of_Bits = 4` and `Weight[5] = log_2(16 - 15) + 1 = 1`. #### Huffman Tree header @@ -1669,6 +1697,8 @@ Version changes --------------- +- 0.3.9 : clarifications for Huffman-compressed literal sizes. +- 0.3.8 : clarifications for Huffman Blocks and Huffman Tree descriptions. - 0.3.7 : clarifications for Repeat_Offsets, matching RFC8878 - 0.3.6 : clarifications for Dictionary_ID - 0.3.5 : clarifications for Block_Maximum_Size diff --git a/sys/contrib/zstd/doc/zstd_manual.html b/sys/contrib/zstd/doc/zstd_manual.html --- a/sys/contrib/zstd/doc/zstd_manual.html +++ b/sys/contrib/zstd/doc/zstd_manual.html @@ -1,10 +1,10 @@ -zstd 1.5.2 Manual +zstd 1.5.5 Manual -

zstd 1.5.2 Manual

+

zstd 1.5.5 Manual


Contents

    @@ -22,15 +22,15 @@
  1. Dictionary helper functions
  2. Advanced dictionary and prefix API (Requires v1.4.0+)
  3. experimental API (static linking only)
  4. -
  5. Frame size functions
  6. +
  7. Frame header and size functions
  8. Memory management
  9. Advanced compression functions
  10. Advanced decompression functions
  11. Advanced streaming functions
  12. -
  13. Buffer-less and synchronous inner streaming functions
  14. +
  15. Buffer-less and synchronous inner streaming functions (DEPRECATED)
  16. Buffer-less streaming compression (synchronous mode)
  17. Buffer-less streaming decompression (synchronous mode)
  18. -
  19. Block level API
  20. +
  21. Block level API (DEPRECATED)

Introduction

@@ -80,7 +80,8 @@
                 const void* src, size_t srcSize,
                       int compressionLevel);
 

Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + enough space to successfully compress the data. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).


@@ -121,7 +122,9 @@ note 6 : This function replaces ZSTD_getDecompressedSize()


-
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
+ZSTDLIB_API
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
 

NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results to the same return value (0), @@ -137,8 +140,30 @@ or an error code if input is invalid


-

Helper functions

#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
-size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+

Helper functions

/* ZSTD_compressBound() :
+ * maximum compressed size in worst case single-pass scenario.
+ * When invoking `ZSTD_compress()` or any other one-pass compression function,
+ * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
+ * as it eliminates one potential failure scenario,
+ * aka not enough room in dst buffer to write the compressed frame.
+ * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE .
+ *        In which case, ZSTD_compressBound() will return an error code
+ *        which can be tested using ZSTD_isError().
+ *
+ * ZSTD_COMPRESSBOUND() :
+ * same as ZSTD_compressBound(), but as a macro.
+ * It can be used to produce constants, which can be useful for static allocation,
+ * for example to size a static array on stack.
+ * Will produce constant value 0 if srcSize too large.
+ */
+#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U)
+#define ZSTD_COMPRESSBOUND(srcSize)   (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+/* ZSTD_isError() :
+ * Most ZSTD_* functions returning a size_t value can be tested for error,
+ * using ZSTD_isError().
+ * @return 1 if error, 0 otherwise
+ */
 unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
 const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
 int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed, requires v1.4.0+ */
@@ -359,6 +384,9 @@
      * ZSTD_c_validateSequences
      * ZSTD_c_useBlockSplitter
      * ZSTD_c_useRowMatchFinder
+     * ZSTD_c_prefetchCDictTables
+     * ZSTD_c_enableSeqProducerFallback
+     * ZSTD_c_maxBlockSize
      * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
      * note : never ever use experimentalParam? names directly;
      *        also, the enums values themselves are unstable and can still change.
@@ -377,7 +405,11 @@
      ZSTD_c_experimentalParam12=1009,
      ZSTD_c_experimentalParam13=1010,
      ZSTD_c_experimentalParam14=1011,
-     ZSTD_c_experimentalParam15=1012
+     ZSTD_c_experimentalParam15=1012,
+     ZSTD_c_experimentalParam16=1013,
+     ZSTD_c_experimentalParam17=1014,
+     ZSTD_c_experimentalParam18=1015,
+     ZSTD_c_experimentalParam19=1016
 } ZSTD_cParameter;
 

typedef struct {
@@ -440,7 +472,7 @@
                   They will be used to compress next frame.
                   Resetting session never fails.
   - The parameters : changes all parameters back to "default".
-                  This removes any reference to any dictionary too.
+                  This also removes any reference to any dictionary or external sequence producer.
                   Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
                   otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
   - Both : similar to resetting the session, followed by resetting parameters.
@@ -455,7 +487,8 @@
   Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
   - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
   - The function is always blocking, returns when compression is completed.
-  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
+  NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
+        enough space to successfully compress the data, though it is possible it fails for other reasons.
  @return : compressed size written into `dst` (<= `dstCapacity),
            or an error code if it fails (which can be tested using ZSTD_isError()).
  
@@ -479,13 +512,15 @@
      * ZSTD_d_stableOutBuffer
      * ZSTD_d_forceIgnoreChecksum
      * ZSTD_d_refMultipleDDicts
+     * ZSTD_d_disableHuffmanAssembly
      * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
      * note : never ever use experimentalParam? names directly
      */
      ZSTD_d_experimentalParam1=1000,
      ZSTD_d_experimentalParam2=1001,
      ZSTD_d_experimentalParam3=1002,
-     ZSTD_d_experimentalParam4=1003
+     ZSTD_d_experimentalParam4=1003,
+     ZSTD_d_experimentalParam5=1004
 
 } ZSTD_dParameter;
 

@@ -654,6 +689,9 @@ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + + Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + to compress with a dictionary.


@@ -689,6 +727,33 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds);
/* accept NULL pointer */

Streaming decompression functions


+
size_t ZSTD_initDStream(ZSTD_DStream* zds);
+

Initialize/reset DStream state for new decompression operation. + Call before new decompression operation using same DStream. + + Note : This function is redundant with the advanced API and equivalent to: + ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); + ZSTD_DCtx_refDDict(zds, NULL); + +


+ +
size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+

Streaming decompression function. + Call repetitively to consume full input updating it as necessary. + Function will update both input and output `pos` fields exposing current state via these fields: + - `input.pos < input.size`, some input remaining and caller should provide remaining input + on the next call. + - `output.pos < output.size`, decoder finished and flushed all remaining buffers. + - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, + call ZSTD_decompressStream() again to flush remaining data to output. + Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + + @return : 0 when a frame is completely decoded and fully flushed, + or an error code, which can be tested using ZSTD_isError(), + or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + +


+
size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
 

size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
@@ -794,7 +859,7 @@
   If @return == 0, the dictID could not be decoded.
   This could for one of the following reasons :
   - The frame does not require a dictionary to be decoded (most common case).
-  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.
     Note : this use case also happens when using a non-conformant dictionary.
   - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
   - This is not a Zstandard frame.
@@ -803,9 +868,11 @@
 
 

Advanced dictionary and prefix API (Requires v1.4.0+)

  This API allows dictionaries to be used with ZSTD_compress2(),
- ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
- only reset with the context is reset with ZSTD_reset_parameters or
- ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ZSTD_compressStream2(), and ZSTD_decompressDCtx().
+ Dictionaries are sticky, they remain valid when same context is re-used,
+ they only reset when the context is reset
+ with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
+ In contrast, Prefixes are single-use.
 
size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
@@ -814,8 +881,9 @@
  @result : 0, or an error code (which can be tested with ZSTD_isError()).
   Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
            meaning "return to no-dictionary mode".
-  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
-           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+  Note 1 : Dictionary is sticky, it will be used for all future compressed frames,
+           until parameters are reset, a new dictionary is loaded, or the dictionary
+           is explicitly invalidated by loading a NULL dictionary.
   Note 2 : Loading a dictionary involves building tables.
            It's also a CPU consuming operation, with non-negligible impact on latency.
            Tables are dependent on compression parameters, and for this reason,
@@ -824,11 +892,15 @@
            Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
            In such a case, dictionary buffer must outlive its users.
   Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
-           to precisely select how dictionary content must be interpreted. 
+           to precisely select how dictionary content must be interpreted.
+  Note 5 : This method does not benefit from LDM (long distance mode).
+           If you want to employ LDM on some large dictionary content,
+           prefer employing ZSTD_CCtx_refPrefix() described below.
+ 
 


size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-

Reference a prepared dictionary, to be used for all next compressed frames. +

Reference a prepared dictionary, to be used for all future compressed frames. Note that compression parameters are enforced from within CDict, and supersede any compression parameter previously set within CCtx. The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -848,6 +920,7 @@ Decompression will need same prefix to properly regenerate data. Compressing with a prefix is similar in outcome as performing a diff and compressing it, but performs much faster, especially during decompression (compression speed is tunable with compression level). + This method is compatible with LDM (long distance mode). @result : 0, or an error code (which can be tested with ZSTD_isError()). Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -863,9 +936,9 @@


size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-

Create an internal DDict from dict buffer, - to be used to decompress next frames. - The dictionary remains valid for all future frames, until explicitly invalidated. +

Create an internal DDict from dict buffer, to be used to decompress all future frames. + The dictionary remains valid for all future frames, until explicitly invalidated, or + a new dictionary is loaded. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". @@ -889,9 +962,10 @@ The memory for the table is allocated on the first call to refDDict, and can be freed with ZSTD_freeDCtx(). + If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + will be managed, and referencing a dictionary effectively "discards" any previous one. + @result : 0, or an error code (which can be tested with ZSTD_isError()). - Note 1 : Currently, only one dictionary can be managed. - Referencing a new dictionary effectively "discards" any previous one. Special: referencing a NULL DDict means "return to no-dictionary mode". Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. @@ -1081,7 +1155,7 @@ ZSTD_ps_disable = 2 /* Do not use the feature */ } ZSTD_paramSwitch_e;


-

Frame size functions


+

Frame header and size functions


 
 
ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
 

`src` should point to the start of a series of ZSTD encoded and/or skippable frames @@ -1127,20 +1201,99 @@ or an error code (if srcSize is too small)


+
typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+

+
typedef struct {
+    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
+    unsigned blockSizeMax;
+    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+    unsigned headerSize;
+    unsigned dictID;
+    unsigned checksumFlag;
+    unsigned _reserved1;
+    unsigned _reserved2;
+} ZSTD_frameHeader;
+

+
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
+/*! ZSTD_getFrameHeader_advanced() :
+ *  same as ZSTD_getFrameHeader(),
+ *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+

decode Frame Header, or requires larger `srcSize`. + @return : 0, `zfhPtr` is correctly filled, + >0, `srcSize` is too small, value is wanted `srcSize` amount, + or an error code, which can be tested using ZSTD_isError() +


+ +
ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
+

Zstd supports in-place decompression, where the input and output buffers overlap. + In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + and the input buffer must be at the end of the output buffer. + + _______________________ Output Buffer ________________________ + | | + | ____ Input Buffer ____| + | | | + v v v + |---------------------------------------|-----------|----------| + ^ ^ ^ + |___________________ Output_Size ___________________|_ Margin _| + + NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + ZSTD_decompressDCtx(). + NOTE: This function supports multi-frame input. + + @param src The compressed frame(s) + @param srcSize The size of the compressed frame(s) + @returns The decompression margin or an error that can be checked with ZSTD_isError(). + +


+ +
#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)(                                              \
+        ZSTD_FRAMEHEADERSIZE_MAX                                                              /* Frame header */ + \
+        4                                                                                         /* checksum */ + \
+        ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
+        (blockSize)                                                                    /* One block of margin */   \
+    ))
+

Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + the compressed frame, compute it from the original size and the blockSizeLog. + See ZSTD_decompressionMargin() for details. + + WARNING: This macro does not support multi-frame input, the input must be a single + zstd frame. If you need that support use the function, or implement it yourself. + + @param originalSize The original uncompressed size of the data. + @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + Unless you explicitly set the windowLog smaller than + ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + +


+
typedef enum {
   ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
   ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
 } ZSTD_sequenceFormat_e;
 

-

Generate sequences using ZSTD_compress2, given a source buffer. +

ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
+

`srcSize` : size of the input buffer + @return : upper-bound for the number of sequences that can be generated + from a buffer of srcSize bytes + + note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + +


+ +

Generate sequences using ZSTD_compress2(), given a source buffer. Each block will end with a dummy sequence with offset == 0, matchLength == 0, and litLength == length of last literals. litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) simply acts as a block delimiter. - zc can be used to insert custom compression params. - This function invokes ZSTD_compress2 + @zc can be used to insert custom compression params. + This function invokes ZSTD_compress2(). The output of this function can be fed into ZSTD_compressSequences() with CCtx setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters @@ -1150,7 +1303,7 @@

ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
 

Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - by merging them into into the literals of the next sequence. + by merging them into the literals of the next sequence. As such, the final generated result has no explicit representation of block boundaries, and the final last literals segment is not represented in the sequences. @@ -1161,10 +1314,13 @@


-
ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
-                      const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
-                      const void* src, size_t srcSize);
-

Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. +

ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
+            const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+            const void* src, size_t srcSize);
+

Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + @src contains the entire input (not just the literals). + If @srcSize > sum(sequence.length), the remaining bytes are considered all literals If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) The entire source is compressed into a single frame. @@ -1189,7 +1345,7 @@ Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, and cannot emit an RLE block that disagrees with the repcode history - @return : final compressed size or a ZSTD error. + @return : final compressed size, or a ZSTD error code.


@@ -1197,7 +1353,7 @@ const void* src, size_t srcSize, unsigned magicVariant);

Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. - Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, + Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. @@ -1253,8 +1409,11 @@ and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - Note 2 : only single-threaded compression is supported. + Note : only single-threaded compression is supported. ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + + Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered.


@@ -1274,7 +1433,12 @@ or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. - In this case, get total size by adding ZSTD_estimate?DictSize + In this case, get total size by adding ZSTD_estimate?DictSize + Note 2 : only single-threaded compression is supported. + ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + Size estimates assume that no external sequence producer is registered. +


ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
@@ -1374,18 +1538,41 @@
   This function never fails (wide contract) 
 


+
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
+

Set all parameters provided within @p cparams into the working @p cctx. + Note : if modifying parameters during compression (MT mode only), + note that changes to the .windowLog parameter will be ignored. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + On failure, no parameters are updated. + +


+ +
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
+

Set all parameters provided within @p fparams into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


+ +
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
+

Set all parameters provided within @p params into the working @p cctx. + @return 0 on success, or an error code (can be checked with ZSTD_isError()). + +


+
ZSTD_DEPRECATED("use ZSTD_compress2")
+ZSTDLIB_STATIC_API
 size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
-                              void* dst, size_t dstCapacity,
-                        const void* src, size_t srcSize,
-                        const void* dict,size_t dictSize,
-                              ZSTD_parameters params);
+                  void* dst, size_t dstCapacity,
+            const void* src, size_t srcSize,
+            const void* dict,size_t dictSize,
+                  ZSTD_parameters params);
 

Note : this function is now DEPRECATED. It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. This prototype will generate compilation warnings.


ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
+ZSTDLIB_STATIC_API
 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                                   void* dst, size_t dstCapacity,
                             const void* src, size_t srcSize,
@@ -1547,6 +1734,7 @@
 


ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
+ZSTDLIB_STATIC_API
 size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
 

This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). Instruct the decoder context about what kind of data to decode next. @@ -1573,6 +1761,7 @@

Advanced Streaming compression functions


ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
              int compressionLevel,
              unsigned long long pledgedSrcSize);
@@ -1590,6 +1779,7 @@
 


ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
          const void* dict, size_t dictSize,
                int compressionLevel);
@@ -1607,16 +1797,14 @@
 


ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
         const void* dict, size_t dictSize,
               ZSTD_parameters params,
               unsigned long long pledgedSrcSize);
-

This function is DEPRECATED, and is approximately equivalent to: +

This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - // Pseudocode: Set each zstd parameter and leave the rest as-is. - for ((param, value) : params) { - ZSTD_CCtx_setParameter(zcs, param, value); - } + ZSTD_CCtx_setParams(zcs, params); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); @@ -1628,6 +1816,7 @@


ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
 

This function is DEPRECATED, and equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1639,16 +1828,14 @@


ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
                    const ZSTD_CDict* cdict,
                          ZSTD_frameParameters fParams,
                          unsigned long long pledgedSrcSize);
-

This function is DEPRECATED, and is approximately equivalent to: +

This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - for ((fParam, value) : fParams) { - ZSTD_CCtx_setParameter(zcs, fParam, value); - } + ZSTD_CCtx_setFParams(zcs, fParams); ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); ZSTD_CCtx_refCDict(zcs, cdict); @@ -1660,6 +1847,7 @@


ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
 

This function is DEPRECATED, and is equivalent to: ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); @@ -1705,39 +1893,71 @@


Advanced Streaming decompression functions


-
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
 

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_loadDictionary(zds, dict, dictSize); note: no dictionary will be used if dict == NULL or dictSize < 8 - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


-
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
 

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); ZSTD_DCtx_refDDict(zds, ddict); note : ddict is referenced, it must outlive decompression session - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


-
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+
ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
 

ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); re-use decompression parameters from previous init; saves dictionary loading - Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x


-

Buffer-less and synchronous inner streaming functions

-  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-  But it's also a complex one, with several restrictions, documented below.
-  Prefer normal streaming API for an easier experience.
+
ZSTDLIB_STATIC_API void
+ZSTD_registerSequenceProducer(
+  ZSTD_CCtx* cctx,
+  void* sequenceProducerState,
+  ZSTD_sequenceProducer_F* sequenceProducer
+);
+

Instruct zstd to use a block-level external sequence producer function. + + The sequenceProducerState must be initialized by the caller, and the caller is + responsible for managing its lifetime. This parameter is sticky across + compressions. It will remain set until the user explicitly resets compression + parameters. + + Sequence producer registration is considered to be an "advanced parameter", + part of the "advanced API". This means it will only have an effect on compression + APIs which respect advanced parameters, such as compress2() and compressStream2(). + Older compression APIs such as compressCCtx(), which predate the introduction of + "advanced parameters", will ignore any external sequence producer setting. + + The sequence producer can be "cleared" by registering a NULL function pointer. This + removes all limitations described above in the "LIMITATIONS" section of the API docs. + + The user is strongly encouraged to read the full API documentation (above) before + calling this function. +


+ +

Buffer-less and synchronous inner streaming functions (DEPRECATED)

+  This API is deprecated, and will be removed in a future version.
+  It allows streaming (de)compression with user allocated buffers.
+  However, it is hard to use, and not as well tested as the rest of
+  our API.
+
+  Please use the normal streaming API instead: ZSTD_compressStream2,
+  and ZSTD_decompressStream.
+  If there is functionality that you need, but it doesn't provide,
+  please open an issue on our GitHub.
  
 
@@ -1748,7 +1968,6 @@ Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : @@ -1769,11 +1988,15 @@ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
-

Buffer-less streaming compression functions

ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+

Buffer-less streaming compression functions

ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
 ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
 ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
-ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
 

+
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+

size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
 

Buffer-less streaming decompression (synchronous mode)

@@ -1785,8 +2008,8 @@
   Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
   Data fragment must be large enough to ensure successful decoding.
  `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
-  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
-           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+  result  : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+           >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
            errorCode, which can be tested using ZSTD_isError().
 
   It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
@@ -1805,7 +2028,7 @@
 
   The most memory efficient way is to use a round buffer of sufficient size.
   Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
-  which can @return an error code if required value is too large for current system (in 32-bits mode).
+  which can return an error code if required value is too large for current system (in 32-bits mode).
   In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
   up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
   which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
@@ -1825,7 +2048,7 @@
   ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
   ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
 
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+  result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
   It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
   It can also be an error code, which can be tested with ZSTD_isError().
 
@@ -1847,34 +2070,25 @@
   For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
 
-

Buffer-less streaming decompression functions

typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
-typedef struct {
-    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
-    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
-    unsigned blockSizeMax;
-    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
-    unsigned headerSize;
-    unsigned dictID;
-    unsigned checksumFlag;
-} ZSTD_frameHeader;
-

-
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
-/*! ZSTD_getFrameHeader_advanced() :
- *  same as ZSTD_getFrameHeader(),
- *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
-ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
-

decode Frame Header, or requires larger `srcSize`. - @return : 0, `zfhPtr` is correctly filled, - >0, `srcSize` is too small, value is wanted `srcSize` amount, - or an error code, which can be tested using ZSTD_isError() -


- +

Buffer-less streaming decompression functions


+
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+

typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
 

-

Block level API


+

Block level API (DEPRECATED)


+
+

You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. -

Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). + Block functions produce and decode raw zstd blocks, without frame metadata. + Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. A few rules to respect : @@ -1883,7 +2097,6 @@ - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. @@ -1899,9 +2112,13 @@ Use ZSTD_insertBlock() for such a case.


-

Raw zstd block functions

ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
+

Raw zstd block functions

ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
+ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
 ZSTDLIB_STATIC_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
 ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
 ZSTDLIB_STATIC_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
 

diff --git a/sys/contrib/zstd/examples/Makefile b/sys/contrib/zstd/examples/Makefile --- a/sys/contrib/zstd/examples/Makefile +++ b/sys/contrib/zstd/examples/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/common.h b/sys/contrib/zstd/examples/common.h --- a/sys/contrib/zstd/examples/common.h +++ b/sys/contrib/zstd/examples/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,6 +21,17 @@ #include // stat #include + +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + +#define HEADER_FUNCTION static UNUSED_ATTR + + /* * Define the returned error code from utility functions. */ @@ -68,7 +79,7 @@ * * @return The size of a given file path. */ -static size_t fsize_orDie(const char *filename) +HEADER_FUNCTION size_t fsize_orDie(const char *filename) { struct stat st; if (stat(filename, &st) != 0) { @@ -96,7 +107,7 @@ * @return If successful this function will return a FILE pointer to an * opened file otherwise it sends an error to stderr and exits. */ -static FILE* fopen_orDie(const char *filename, const char *instruction) +HEADER_FUNCTION FILE* fopen_orDie(const char *filename, const char *instruction) { FILE* const inFile = fopen(filename, instruction); if (inFile) return inFile; @@ -108,7 +119,7 @@ /*! fclose_orDie() : * Close an opened file using given FILE pointer. */ -static void fclose_orDie(FILE* file) +HEADER_FUNCTION void fclose_orDie(FILE* file) { if (!fclose(file)) { return; }; /* error */ @@ -123,7 +134,7 @@ * * @return The number of bytes read. */ -static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +HEADER_FUNCTION size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) { size_t const readSize = fread(buffer, 1, sizeToRead, file); if (readSize == sizeToRead) return readSize; /* good */ @@ -143,7 +154,7 @@ * * @return The number of bytes written. */ -static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +HEADER_FUNCTION size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) { size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ @@ -159,7 +170,7 @@ * cated memory. If there is an error, this function will send that * error to stderr and exit. */ -static void* malloc_orDie(size_t size) +HEADER_FUNCTION void* malloc_orDie(size_t size) { void* const buff = malloc(size); if (buff) return buff; @@ -177,7 +188,7 @@ * @return If successful this function will load file into buffer and * return file size, otherwise it will printout an error to stderr and exit. */ -static size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) +HEADER_FUNCTION size_t loadFile_orDie(const char* fileName, void* buffer, size_t bufferSize) { size_t const fileSize = fsize_orDie(fileName); CHECK(fileSize <= bufferSize, "File too large!"); @@ -201,7 +212,8 @@ * @return If successful this function will return buffer and bufferSize(=fileSize), * otherwise it will printout an error to stderr and exit. */ -static void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) { +HEADER_FUNCTION void* mallocAndLoadFile_orDie(const char* fileName, size_t* bufferSize) +{ size_t const fileSize = fsize_orDie(fileName); *bufferSize = fileSize; void* const buffer = malloc_orDie(*bufferSize); @@ -217,7 +229,7 @@ * Note: This function will send an error to stderr and exit if it * cannot write to a given file. */ -static void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) +HEADER_FUNCTION void saveFile_orDie(const char* fileName, const void* buff, size_t buffSize) { FILE* const oFile = fopen_orDie(fileName, "wb"); size_t const wSize = fwrite(buff, 1, buffSize, oFile); diff --git a/sys/contrib/zstd/examples/dictionary_compression.c b/sys/contrib/zstd/examples/dictionary_compression.c --- a/sys/contrib/zstd/examples/dictionary_compression.c +++ b/sys/contrib/zstd/examples/dictionary_compression.c @@ -1,12 +1,22 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. - */ +**/ + +/* This example deals with Dictionary compression, + * its counterpart is `examples/dictionary_decompression.c` . + * These examples presume that a dictionary already exists. + * The main method to create a dictionary is `zstd --train`, + * look at the CLI documentation for details. + * Another possible method is to employ dictionary training API, + * published in `lib/zdict.h` . +**/ + #include // printf #include // free #include // memset, strcat @@ -14,7 +24,7 @@ #include "common.h" // Helper functions, CHECK(), and CHECK_ZSTD() /* createDict() : - `dictFileName` is supposed to have been created using `zstd --train` */ +** `dictFileName` is supposed already created using `zstd --train` */ static ZSTD_CDict* createCDict_orDie(const char* dictFileName, int cLevel) { size_t dictSize; diff --git a/sys/contrib/zstd/examples/dictionary_decompression.c b/sys/contrib/zstd/examples/dictionary_decompression.c --- a/sys/contrib/zstd/examples/dictionary_decompression.c +++ b/sys/contrib/zstd/examples/dictionary_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/multiple_simple_compression.c b/sys/contrib/zstd/examples/multiple_simple_compression.c --- a/sys/contrib/zstd/examples/multiple_simple_compression.c +++ b/sys/contrib/zstd/examples/multiple_simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/multiple_streaming_compression.c b/sys/contrib/zstd/examples/multiple_streaming_compression.c --- a/sys/contrib/zstd/examples/multiple_streaming_compression.c +++ b/sys/contrib/zstd/examples/multiple_streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/simple_compression.c b/sys/contrib/zstd/examples/simple_compression.c --- a/sys/contrib/zstd/examples/simple_compression.c +++ b/sys/contrib/zstd/examples/simple_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/simple_decompression.c b/sys/contrib/zstd/examples/simple_decompression.c --- a/sys/contrib/zstd/examples/simple_decompression.c +++ b/sys/contrib/zstd/examples/simple_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/streaming_compression.c b/sys/contrib/zstd/examples/streaming_compression.c --- a/sys/contrib/zstd/examples/streaming_compression.c +++ b/sys/contrib/zstd/examples/streaming_compression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/streaming_compression_thread_pool.c b/sys/contrib/zstd/examples/streaming_compression_thread_pool.c --- a/sys/contrib/zstd/examples/streaming_compression_thread_pool.c +++ b/sys/contrib/zstd/examples/streaming_compression_thread_pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Martin Liska, SUSE, Facebook, Inc. + * Copyright (c) Martin Liska, SUSE, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/streaming_decompression.c b/sys/contrib/zstd/examples/streaming_decompression.c --- a/sys/contrib/zstd/examples/streaming_decompression.c +++ b/sys/contrib/zstd/examples/streaming_decompression.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/examples/streaming_memory_usage.c b/sys/contrib/zstd/examples/streaming_memory_usage.c --- a/sys/contrib/zstd/examples/streaming_memory_usage.c +++ b/sys/contrib/zstd/examples/streaming_memory_usage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/Makefile b/sys/contrib/zstd/lib/Makefile --- a/sys/contrib/zstd/lib/Makefile +++ b/sys/contrib/zstd/lib/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -298,7 +298,7 @@ libzstd.pc: libzstd.pc.in @echo creating pkgconfig - @sed $(SED_ERE_OPT) \ + @sed \ -e 's|@PREFIX@|$(PREFIX)|' \ -e 's|@EXEC_PREFIX@|$(PCEXEC_PREFIX)|' \ -e 's|@INCLUDEDIR@|$(PCINCPREFIX)$(PCINCDIR)|' \ diff --git a/sys/contrib/zstd/lib/README.md b/sys/contrib/zstd/lib/README.md --- a/sys/contrib/zstd/lib/README.md +++ b/sys/contrib/zstd/lib/README.md @@ -91,7 +91,7 @@ `ZSTD_LIB_COMPRESSION, ZSTD_LIB_DECOMPRESSION`, `ZSTD_LIB_DICTBUILDER`, and `ZSTD_LIB_DEPRECATED` as `0` to forgo compilation of the corresponding features. This will also disable compilation of all - dependencies (eg. `ZSTD_LIB_COMPRESSION=0` will also disable + dependencies (e.g. `ZSTD_LIB_COMPRESSION=0` will also disable dictBuilder). - There are a number of options that can help minimize the binary size of @@ -161,6 +161,13 @@ `ZSTD_DCtx` decompression contexts, but might also result in a small decompression speed cost. +- The C compiler macros `ZSTDLIB_VISIBLE`, `ZSTDERRORLIB_VISIBLE` and `ZDICTLIB_VISIBLE` + can be overridden to control the visibility of zstd's API. Additionally, + `ZSTDLIB_STATIC_API` and `ZDICTLIB_STATIC_API` can be overridden to control the visibility + of zstd's static API. Specifically, it can be set to `ZSTDLIB_HIDDEN` to hide the symbols + from the shared library. These macros default to `ZSTDLIB_VISIBILITY`, + `ZSTDERRORLIB_VSIBILITY`, and `ZDICTLIB_VISIBILITY` if unset, for backwards compatibility + with the old macro names. #### Windows : using MinGW+MSYS to create DLL diff --git a/sys/contrib/zstd/lib/common/allocations.h b/sys/contrib/zstd/lib/common/allocations.h new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/lib/common/allocations.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This file provides custom allocation primitives + */ + +#define ZSTD_DEPS_NEED_MALLOC +#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ + +#include "mem.h" /* MEM_STATIC */ +#define ZSTD_STATIC_LINKING_ONLY +#include "../zstd.h" /* ZSTD_customMem */ + +#ifndef ZSTD_ALLOCATIONS_H +#define ZSTD_ALLOCATIONS_H + +/* custom memory allocation functions */ + +MEM_STATIC void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); + return ZSTD_malloc(size); +} + +MEM_STATIC void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); + ZSTD_memset(ptr, 0, size); + return ptr; + } + return ZSTD_calloc(1, size); +} + +MEM_STATIC void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else + ZSTD_free(ptr); + } +} + +#endif /* ZSTD_ALLOCATIONS_H */ diff --git a/sys/contrib/zstd/lib/common/bits.h b/sys/contrib/zstd/lib/common/bits.h new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/lib/common/bits.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_BITS_H +#define ZSTD_BITS_H + +#include "mem.h" + +MEM_STATIC unsigned ZSTD_countTrailingZeros32_fallback(U32 val) +{ + assert(val != 0); + { + static const U32 DeBruijnBytePos[32] = {0, 1, 28, 2, 29, 14, 24, 3, + 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, + 26, 12, 18, 6, 11, 5, 10, 9}; + return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) +{ + assert(val != 0); +# if defined(_MSC_VER) +# if STATIC_BMI2 == 1 + return (unsigned)_tzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_ctz(val); +# else + return ZSTD_countTrailingZeros32_fallback(val); +# endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { + assert(val != 0); + { + static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31}; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + return 31 - DeBruijnClz[(val * 0x07C4ACDDU) >> 27]; + } +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) +{ + assert(val != 0); +# if defined(_MSC_VER) +# if STATIC_BMI2 == 1 + return (unsigned)_lzcnt_u32(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)(31 - r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)__builtin_clz(val); +# else + return ZSTD_countLeadingZeros32_fallback(val); +# endif +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) +{ + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 == 1 + return (unsigned)_tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) && defined(__LP64__) + return (unsigned)__builtin_ctzll(val); +# else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); + } else { + return ZSTD_countTrailingZeros32(leastSignificantWord); + } + } +# endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) +{ + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 == 1 + return (unsigned)_lzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, val); + return (unsigned)(63 - r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (unsigned)(__builtin_clzll(val)); +# else + { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (mostSignificantWord == 0) { + return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); + } else { + return ZSTD_countLeadingZeros32(mostSignificantWord); + } + } +# endif +} + +MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { + return ZSTD_countTrailingZeros64((U64)val) >> 3; + } else { + return ZSTD_countTrailingZeros32((U32)val) >> 3; + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { + return ZSTD_countLeadingZeros64((U64)val) >> 3; + } else { + return ZSTD_countLeadingZeros32((U32)val) >> 3; + } + } +} + +MEM_STATIC unsigned ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ +{ + assert(val != 0); + return 31 - ZSTD_countLeadingZeros32(val); +} + +/* ZSTD_rotateRight_*(): + * Rotates a bitfield to the right by "count" bits. + * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts + */ +MEM_STATIC +U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { + assert(count < 64); + count &= 0x3F; /* for fickle pattern recognition */ + return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); +} + +MEM_STATIC +U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { + assert(count < 32); + count &= 0x1F; /* for fickle pattern recognition */ + return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); +} + +MEM_STATIC +U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { + assert(count < 16); + count &= 0x0F; /* for fickle pattern recognition */ + return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); +} + +#endif /* ZSTD_BITS_H */ diff --git a/sys/contrib/zstd/lib/common/bitstream.h b/sys/contrib/zstd/lib/common/bitstream.h --- a/sys/contrib/zstd/lib/common/bitstream.h +++ b/sys/contrib/zstd/lib/common/bitstream.h @@ -1,7 +1,7 @@ /* ****************************************************************** * bitstream * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -30,14 +30,15 @@ #include "compiler.h" /* UNLIKELY() */ #include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ +#include "bits.h" /* ZSTD_highbit32 */ /*========================================= * Target specific =========================================*/ #ifndef ZSTD_NO_INTRINSICS -# if defined(__BMI__) && defined(__GNUC__) -# include /* support for bextr (experimental) */ +# if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__) +# include /* support for bextr (experimental)/bzhi */ # elif defined(__ICCARM__) # include # endif @@ -132,48 +133,6 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ - - -/*-************************************************************** -* Internal functions -****************************************************************/ -MEM_STATIC unsigned BIT_highbit32 (U32 val) -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ -# if STATIC_BMI2 == 1 - return _lzcnt_u32(val) ^ 31; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return 31 - __CLZ(val); -# else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; -# endif - } -} - /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, @@ -203,6 +162,16 @@ return 0; } +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ +#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS) + return _bzhi_u64(bitContainer, nbBits); +#else + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; +#endif +} + /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ @@ -212,7 +181,7 @@ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); - bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitContainer |= BIT_getLowerBits(value, nbBits) << bitC->bitPos; bitC->bitPos += nbBits; } @@ -291,7 +260,7 @@ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; @@ -319,7 +288,7 @@ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + bitD->bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; @@ -350,16 +319,6 @@ #endif } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) -{ -#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 - return _bzhi_u64(bitContainer, nbBits); -#else - assert(nbBits < BIT_MASK_SIZE); - return bitContainer & BIT_mask[nbBits]; -#endif -} - /*! BIT_lookBits() : * Provides next n bits from local register. * local register is not modified. @@ -406,7 +365,7 @@ } /*! BIT_readBitsFast() : - * unsafe version; only works only if nbBits >= 1 */ + * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); @@ -437,7 +396,7 @@ * This function is safe, it guarantees it will not read beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ -MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +MEM_STATIC FORCE_INLINE_ATTR BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ return BIT_DStream_overflow; diff --git a/sys/contrib/zstd/lib/common/compiler.h b/sys/contrib/zstd/lib/common/compiler.h --- a/sys/contrib/zstd/lib/common/compiler.h +++ b/sys/contrib/zstd/lib/common/compiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -165,6 +165,12 @@ #define UNLIKELY(x) (x) #endif +#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) +# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); } +#else +# define ZSTD_UNREACHABLE { assert(0); } +#endif + /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ @@ -181,6 +187,8 @@ # ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2 # define STATIC_BMI2 1 # endif +# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__) +# define STATIC_BMI2 1 # endif #endif @@ -273,7 +281,18 @@ * Sanitizer *****************************************************************/ -#if ZSTD_MEMORY_SANITIZER +/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an + * abundance of caution, disable our custom poisoning on mingw. */ +#ifdef __MINGW32__ +#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE +#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1 +#endif +#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE +#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1 +#endif +#endif + +#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ @@ -292,9 +311,13 @@ /* Returns the offset of the first (at least partially) poisoned byte in the memory range, or -1 if the whole range is good. */ intptr_t __msan_test_shadow(const volatile void *x, size_t size); + +/* Print shadow and origin for the memory range to stderr in a human-readable + format. */ +void __msan_print_shadow(const volatile void *x, size_t size); #endif -#if ZSTD_ADDRESS_SANITIZER +#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to * include the header file... */ diff --git a/sys/contrib/zstd/lib/common/cpu.h b/sys/contrib/zstd/lib/common/cpu.h --- a/sys/contrib/zstd/lib/common/cpu.h +++ b/sys/contrib/zstd/lib/common/cpu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/common/debug.h b/sys/contrib/zstd/lib/common/debug.h --- a/sys/contrib/zstd/lib/common/debug.h +++ b/sys/contrib/zstd/lib/common/debug.h @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/sys/contrib/zstd/lib/common/debug.c b/sys/contrib/zstd/lib/common/debug.c --- a/sys/contrib/zstd/lib/common/debug.c +++ b/sys/contrib/zstd/lib/common/debug.c @@ -1,7 +1,7 @@ /* ****************************************************************** * debug * Part of FSE library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/sys/contrib/zstd/lib/common/entropy_common.c b/sys/contrib/zstd/lib/common/entropy_common.c --- a/sys/contrib/zstd/lib/common/entropy_common.c +++ b/sys/contrib/zstd/lib/common/entropy_common.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Common functions of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,8 +19,8 @@ #include "error_private.h" /* ERR_*, ERROR */ #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ #include "fse.h" -#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ #include "huf.h" +#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */ /*=== Version ===*/ @@ -38,34 +38,6 @@ /*-************************************************************** * FSE NCount encoding-decoding ****************************************************************/ -static U32 FSE_ctz(U32 val) -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ - if (val != 0) { - unsigned long r; - _BitScanForward(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ - return __builtin_ctz(val); -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return __CTZ(val); -# else /* Software version */ - U32 count = 0; - while ((val & 1) == 0) { - val >>= 1; - ++count; - } - return count; -# endif - } -} - FORCE_INLINE_TEMPLATE size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) @@ -113,7 +85,7 @@ * repeat. * Avoid UB by setting the high bit to 1. */ - int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1; + int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; while (repeats >= 12) { charnum += 3 * 12; if (LIKELY(ip <= iend-7)) { @@ -124,7 +96,7 @@ ip = iend - 4; } bitStream = MEM_readLE32(ip) >> bitCount; - repeats = FSE_ctz(~bitStream | 0x80000000) >> 1; + repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1; } charnum += 3 * repeats; bitStream >>= 2 * repeats; @@ -189,7 +161,7 @@ * know that threshold > 1. */ if (remaining <= 1) break; - nbBits = BIT_highbit32(remaining) + 1; + nbBits = ZSTD_highbit32(remaining) + 1; threshold = 1 << (nbBits - 1); } if (charnum >= maxSV1) break; @@ -264,7 +236,7 @@ const void* src, size_t srcSize) { U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; - return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0); + return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0); } FORCE_INLINE_TEMPLATE size_t @@ -312,14 +284,14 @@ if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ - { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; - U32 const verif = 1 << BIT_highbit32(rest); - U32 const lastWeight = BIT_highbit32(rest) + 1; + U32 const verif = 1 << ZSTD_highbit32(rest); + U32 const lastWeight = ZSTD_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; @@ -356,13 +328,13 @@ U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, - int bmi2) + int flags) { #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } #endif - (void)bmi2; + (void)flags; return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } diff --git a/sys/contrib/zstd/lib/common/error_private.h b/sys/contrib/zstd/lib/common/error_private.h --- a/sys/contrib/zstd/lib/common/error_private.h +++ b/sys/contrib/zstd/lib/common/error_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/common/error_private.c b/sys/contrib/zstd/lib/common/error_private.c --- a/sys/contrib/zstd/lib/common/error_private.c +++ b/sys/contrib/zstd/lib/common/error_private.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,9 +27,11 @@ case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; - case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(corruption_detected): return "Data corruption detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(literals_headerWrong): return "Header of Literals' block doesn't respect format specification"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_combination_unsupported): return "Unsupported combination of parameters"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; @@ -38,17 +40,22 @@ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; + case PREFIX(noForwardProgress_destFull): return "Operation made no progress over multiple calls, due to output buffer being full"; + case PREFIX(noForwardProgress_inputEmpty): return "Operation made no progress over multiple calls, due to input being empty"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; + case PREFIX(sequenceProducer_failed): return "Block-level external sequence producer returned an error code"; + case PREFIX(externalSequences_invalid): return "External sequences are not valid"; case PREFIX(maxCode): default: return notErrorCode; } diff --git a/sys/contrib/zstd/lib/common/fse.h b/sys/contrib/zstd/lib/common/fse.h --- a/sys/contrib/zstd/lib/common/fse.h +++ b/sys/contrib/zstd/lib/common/fse.h @@ -1,7 +1,7 @@ /* ****************************************************************** * FSE : Finite State Entropy codec * Public Prototypes declaration - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -53,34 +53,6 @@ FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ -/*-**************************************** -* FSE simple functions -******************************************/ -/*! FSE_compress() : - Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. - 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). - @return : size of compressed data (<= dstCapacity). - Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. - if FSE_isError(return), compression failed (more details using FSE_getErrorName()) -*/ -FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/*! FSE_decompress(): - Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', - into already allocated destination buffer 'dst', of size 'dstCapacity'. - @return : size of regenerated data (<= maxDstSize), - or an error code, which can be tested using FSE_isError() . - - ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! - Why ? : making this distinction requires a header. - Header management is intentionally delegated to the user layer, which can better manage special cases. -*/ -FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, - const void* cSrc, size_t cSrcSize); - - /*-***************************************** * Tool functions ******************************************/ @@ -91,20 +63,6 @@ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ -/*-***************************************** -* FSE advanced functions -******************************************/ -/*! FSE_compress2() : - Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' - Both parameters can be defined as '0' to mean : use default value - @return : size of compressed data - Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! - if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. - if FSE_isError(return), it's an error code. -*/ -FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); - - /*-***************************************** * FSE detailed API ******************************************/ @@ -164,8 +122,6 @@ /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); -FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @@ -241,23 +197,7 @@ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize, int bmi2); -/*! Constructor and Destructor of FSE_DTable. - Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); -FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); - -/*! FSE_buildDTable(): - Builds 'dt', which must be already allocated, using FSE_createDTable(). - return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); - -/*! FSE_decompress_usingDTable(): - Decompress compressed source `cSrc` of size `cSrcSize` using `dt` - into `dst` which must be already allocated. - @return : size of regenerated data (necessarily <= `dstCapacity`), - or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : @@ -320,16 +260,6 @@ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ -/* FSE_compress_wksp() : - * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). - * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. - */ -#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) -size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); - -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ - size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ @@ -347,19 +277,11 @@ FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ - -size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ - -#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1) #define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned)) -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize); -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */ - size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); -/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */ +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`. + * Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */ typedef enum { FSE_repeat_none, /**< Cannot use the previous table */ @@ -555,7 +477,7 @@ /* FSE_getMaxNbBits() : * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * Fractional get rounded up (i.e. a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) diff --git a/sys/contrib/zstd/lib/common/fse_decompress.c b/sys/contrib/zstd/lib/common/fse_decompress.c --- a/sys/contrib/zstd/lib/common/fse_decompress.c +++ b/sys/contrib/zstd/lib/common/fse_decompress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy decoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -24,6 +24,7 @@ #include "error_private.h" #define ZSTD_DEPS_NEED_MALLOC #include "zstd_deps.h" +#include "bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -55,19 +56,6 @@ #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) - -/* Function templates */ -FSE_DTable* FSE_createDTable (unsigned tableLog) -{ - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); -} - -void FSE_freeDTable (FSE_DTable* dt) -{ - ZSTD_free(dt); -} - static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) { void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ @@ -127,10 +115,10 @@ } } /* Now we spread those positions across the table. - * The benefit of doing it in two stages is that we avoid the the + * The benefit of doing it in two stages is that we avoid the * variable size inner loop, which caused lots of branch misses. * Now we can run through all the positions without any branch misses. - * We unroll the loop twice, since that is what emperically worked best. + * We unroll the loop twice, since that is what empirically worked best. */ { size_t position = 0; @@ -166,7 +154,7 @@ for (u=0; utableLog = 0; - DTableH->fastMode = 0; - - cell->newState = 0; - cell->symbol = symbolValue; - cell->nbBits = 0; - - return 0; -} - - -size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) -{ - void* ptr = dt; - FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; - void* dPtr = dt + 1; - FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSV1 = tableMask+1; - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* Build Decoding Table */ - DTableH->tableLog = (U16)nbBits; - DTableH->fastMode = 1; - for (s=0; sfastMode; - - /* select fast mode (static) */ - if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); - return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); -} - - -size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) -{ - return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0); -} - typedef struct { short ncount[FSE_MAX_SYMBOL_VALUE + 1]; FSE_DTable dtable[1]; /* Dynamically sized */ @@ -342,7 +267,8 @@ } if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); - workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog); + assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize); + workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog); CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) ); @@ -382,22 +308,4 @@ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } - -typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { - U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)]; - return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp)); -} - -size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) -{ - /* Static analyzer seems unable to understand this table will be properly initialized later */ - U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp)); -} -#endif - - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/sys/contrib/zstd/lib/common/huf.h b/sys/contrib/zstd/lib/common/huf.h --- a/sys/contrib/zstd/lib/common/huf.h +++ b/sys/contrib/zstd/lib/common/huf.h @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman codec, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -21,99 +21,22 @@ /* *** Dependencies *** */ #include "zstd_deps.h" /* size_t */ - - -/* *** library symbols visibility *** */ -/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, - * HUF symbols remain "private" (internal symbols for library only). - * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define HUF_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ -#else -# define HUF_PUBLIC_API -#endif - - -/* ========================== */ -/* *** simple functions *** */ -/* ========================== */ - -/** HUF_compress() : - * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. - * 'dst' buffer must be already allocated. - * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). - * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. - * @return : size of compressed data (<= `dstCapacity`). - * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) - */ -HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/** HUF_decompress() : - * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', - * into already allocated buffer 'dst', of minimum size 'dstSize'. - * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. - * Note : in contrast with FSE, HUF_decompress can regenerate - * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, - * because it knows size to regenerate (originalSize). - * @return : size of regenerated data (== originalSize), - * or an error code, which can be tested using HUF_isError() - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); +#include "mem.h" /* U32 */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ -/* *** Advanced function *** */ - -/** HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . - * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ -HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog); - -/** HUF_compress4X_wksp() : - * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) -HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize); - -#endif /* HUF_H_298734234 */ - -/* ****************************************************************** - * WARNING !! - * The following section contains advanced and experimental definitions - * which shall never be used in the context of a dynamic library, - * because they are not guaranteed to remain stable in the future. - * Only consider them in association with static linking. - * *****************************************************************/ -#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) -#define HUF_H_HUF_STATIC_LINKING_ONLY - -/* *** Dependencies *** */ -#include "mem.h" /* U32 */ -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" - /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ @@ -154,25 +77,49 @@ /* **************************************** * Advanced decompression functions ******************************************/ -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -#endif -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +/** + * Huffman flags bitset. + * For all flags, 0 is the default value. + */ +typedef enum { + /** + * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime. + * Otherwise: Ignored. + */ + HUF_flags_bmi2 = (1 << 0), + /** + * If set: Test possible table depths to find the one that produces the smallest header + encoded size. + * If unset: Use heuristic to find the table depth. + */ + HUF_flags_optimalDepth = (1 << 1), + /** + * If set: If the previous table can encode the input, always reuse the previous table. + * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output. + */ + HUF_flags_preferRepeat = (1 << 2), + /** + * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress. + * If unset: Always histogram the entire input. + */ + HUF_flags_suspectUncompressible = (1 << 3), + /** + * If set: Don't use assembly implementations + * If unset: Allow using assembly implementations + */ + HUF_flags_disableAsm = (1 << 4), + /** + * If set: Don't use the fast decoding loop, always use the fallback decoding loop. + * If unset: Use the fast decoding loop when possible. + */ + HUF_flags_disableFast = (1 << 5) +} HUF_flags_e; /* **************************************** * HUF detailed API * ****************************************/ +#define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") @@ -185,12 +132,12 @@ * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +unsigned HUF_minTableLog(unsigned symbolCardinality); +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue); +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace, + size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); @@ -199,6 +146,7 @@ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ } HUF_repeat; + /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -209,13 +157,13 @@ const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, @@ -241,7 +189,7 @@ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workspace, size_t wkspSize, - int bmi2); + int flags); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ @@ -279,32 +227,12 @@ #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif - /* ====================== */ /* single stream variants */ /* ====================== */ -size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. @@ -315,49 +243,30 @@ const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ -#endif - -size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); -size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif + HUF_CElt* hufTable, HUF_repeat* repeat, int flags); -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */ #endif /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags); +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags); #endif -#endif /* HUF_STATIC_LINKING_ONLY */ +#endif /* HUF_H_298734234 */ #if defined (__cplusplus) } diff --git a/sys/contrib/zstd/lib/common/mem.h b/sys/contrib/zstd/lib/common/mem.h --- a/sys/contrib/zstd/lib/common/mem.h +++ b/sys/contrib/zstd/lib/common/mem.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -133,21 +133,15 @@ /*-************************************************************** * Memory I/O Implementation *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. +/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory: + * Method 0 : always use `memcpy()`. Safe and portable. + * Method 1 : Use compiler extension to set unaligned access. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) + * Default : method 1 if supported, else method 0 */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) +# ifdef __GNUC__ # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -190,30 +184,19 @@ #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) - __pragma( pack(push, 1) ) - typedef struct { U16 v; } unalign16; - typedef struct { U32 v; } unalign32; - typedef struct { U64 v; } unalign64; - typedef struct { size_t v; } unalignArch; - __pragma( pack(pop) ) -#else - typedef struct { U16 v; } __attribute__((packed)) unalign16; - typedef struct { U32 v; } __attribute__((packed)) unalign32; - typedef struct { U64 v; } __attribute__((packed)) unalign64; - typedef struct { size_t v; } __attribute__((packed)) unalignArch; -#endif +typedef __attribute__((aligned(1))) U16 unalign16; +typedef __attribute__((aligned(1))) U32 unalign32; +typedef __attribute__((aligned(1))) U64 unalign64; +typedef __attribute__((aligned(1))) size_t unalignArch; -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } +MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; } -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; } #else @@ -257,6 +240,14 @@ #endif /* MEM_FORCE_MEMORY_ACCESS */ +MEM_STATIC U32 MEM_swap32_fallback(U32 in) +{ + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +} + MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -265,22 +256,13 @@ || (defined(__clang__) && __has_builtin(__builtin_bswap32)) return __builtin_bswap32(in); #else - return ((in << 24) & 0xff000000 ) | - ((in << 8) & 0x00ff0000 ) | - ((in >> 8) & 0x0000ff00 ) | - ((in >> 24) & 0x000000ff ); + return MEM_swap32_fallback(in); #endif } -MEM_STATIC U64 MEM_swap64(U64 in) +MEM_STATIC U64 MEM_swap64_fallback(U64 in) { -#if defined(_MSC_VER) /* Visual Studio */ - return _byteswap_uint64(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap64)) - return __builtin_bswap64(in); -#else - return ((in << 56) & 0xff00000000000000ULL) | + return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | @@ -288,6 +270,17 @@ ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ + || (defined(__clang__) && __has_builtin(__builtin_bswap64)) + return __builtin_bswap64(in); +#else + return MEM_swap64_fallback(in); #endif } diff --git a/sys/contrib/zstd/lib/common/pool.h b/sys/contrib/zstd/lib/common/pool.h --- a/sys/contrib/zstd/lib/common/pool.h +++ b/sys/contrib/zstd/lib/common/pool.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -38,6 +38,12 @@ */ void POOL_free(POOL_ctx* ctx); + +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx); + /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, diff --git a/sys/contrib/zstd/lib/common/pool.c b/sys/contrib/zstd/lib/common/pool.c --- a/sys/contrib/zstd/lib/common/pool.c +++ b/sys/contrib/zstd/lib/common/pool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,9 +10,9 @@ /* ====== Dependencies ======= */ +#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */ #include "zstd_deps.h" /* size_t */ #include "debug.h" /* assert */ -#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "pool.h" /* ====== Compiler specifics ====== */ @@ -96,9 +96,7 @@ /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; - if (ctx->queueSize == 1) { - ZSTD_pthread_cond_signal(&ctx->queuePushCond); - } + ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } } /* for (;;) */ @@ -128,7 +126,7 @@ * empty and full queues. */ ctx->queueSize = queueSize + 1; - ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; @@ -142,7 +140,7 @@ } ctx->shutdown = 0; /* Allocate space for the thread handles */ - ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; ctx->customMem = customMem; /* Check for errors */ @@ -175,7 +173,7 @@ /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */ } } } @@ -190,6 +188,17 @@ ZSTD_customFree(ctx, ctx->customMem); } +/*! POOL_joinJobs() : + * Waits for all queued jobs to finish executing. + */ +void POOL_joinJobs(POOL_ctx* ctx) { + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} + void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { POOL_free (pool); } @@ -211,7 +220,7 @@ return 0; } /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); + { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); if (!threadPool) return 1; /* replace existing thread pool */ ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); @@ -262,7 +271,9 @@ static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { - POOL_job const job = {function, opaque}; + POOL_job job; + job.function = function; + job.opaque = opaque; assert(ctx != NULL); if (ctx->shutdown) return; @@ -330,6 +341,11 @@ (void)ctx; } +void POOL_joinJobs(POOL_ctx* ctx){ + assert(!ctx || ctx == &g_poolCtx); + (void)ctx; +} + int POOL_resize(POOL_ctx* ctx, size_t numThreads) { (void)ctx; (void)numThreads; return 0; diff --git a/sys/contrib/zstd/lib/common/portability_macros.h b/sys/contrib/zstd/lib/common/portability_macros.h --- a/sys/contrib/zstd/lib/common/portability_macros.h +++ b/sys/contrib/zstd/lib/common/portability_macros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,7 @@ #define ZSTD_PORTABILITY_MACROS_H /** - * This header file contains macro defintions to support portability. + * This header file contains macro definitions to support portability. * This header is shared between C and ASM code, so it MUST only * contain macro definitions. It MUST not contain any C code. * @@ -88,7 +88,7 @@ #endif /** - * Only enable assembly for GNUC comptabile compilers, + * Only enable assembly for GNUC compatible compilers, * because other platforms may not support GAS assembly syntax. * * Only enable assembly for Linux / MacOS, other platforms may @@ -134,4 +134,23 @@ # define ZSTD_ENABLE_ASM_X86_64_BMI2 0 #endif +/* + * For x86 ELF targets, add .note.gnu.property section for Intel CET in + * assembly sources when CET is enabled. + * + * Additionally, any function that may be called indirectly must begin + * with ZSTD_CET_ENDBRANCH. + */ +#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \ + && defined(__has_include) +# if __has_include() +# include +# define ZSTD_CET_ENDBRANCH _CET_ENDBR +# endif +#endif + +#ifndef ZSTD_CET_ENDBRANCH +# define ZSTD_CET_ENDBRANCH +#endif + #endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/sys/contrib/zstd/lib/common/threading.h b/sys/contrib/zstd/lib/common/threading.h --- a/sys/contrib/zstd/lib/common/threading.h +++ b/sys/contrib/zstd/lib/common/threading.h @@ -23,8 +23,7 @@ #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ #ifdef WINVER # undef WINVER @@ -62,16 +61,12 @@ #define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) /* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef struct { - HANDLE handle; - void* (*start_routine)(void*); - void* arg; -} ZSTD_pthread_t; +typedef HANDLE ZSTD_pthread_t; int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); -int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); +int ZSTD_pthread_join(ZSTD_pthread_t thread); /** * add here more wrappers as required @@ -99,7 +94,7 @@ #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #else /* DEBUGLEVEL >= 1 */ @@ -124,7 +119,7 @@ #define ZSTD_pthread_t pthread_t #define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) +#define ZSTD_pthread_join(a) pthread_join((a),NULL) #endif diff --git a/sys/contrib/zstd/lib/common/threading.c b/sys/contrib/zstd/lib/common/threading.c --- a/sys/contrib/zstd/lib/common/threading.c +++ b/sys/contrib/zstd/lib/common/threading.c @@ -23,8 +23,7 @@ #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + * Windows minimalist Pthread Wrapper */ @@ -35,37 +34,92 @@ /* === Implementation === */ +typedef struct { + void* (*start_routine)(void*); + void* arg; + int initialized; + ZSTD_pthread_cond_t initialized_cond; + ZSTD_pthread_mutex_t initialized_mutex; +} ZSTD_thread_params_t; + static unsigned __stdcall worker(void *arg) { - ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; - thread->arg = thread->start_routine(thread->arg); + void* (*start_routine)(void*); + void* thread_arg; + + /* Initialized thread_arg and start_routine and signal main thread that we don't need it + * to wait any longer. + */ + { + ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg; + thread_arg = thread_param->arg; + start_routine = thread_param->start_routine; + + /* Signal main thread that we are running and do not depend on its memory anymore */ + ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex); + thread_param->initialized = 1; + ZSTD_pthread_cond_signal(&thread_param->initialized_cond); + ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex); + } + + start_routine(thread_arg); + return 0; } int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { + ZSTD_thread_params_t thread_param; (void)unused; - thread->arg = arg; - thread->start_routine = start_routine; - thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); - if (!thread->handle) + thread_param.start_routine = start_routine; + thread_param.arg = arg; + thread_param.initialized = 0; + *thread = NULL; + + /* Setup thread initialization synchronization */ + if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) { + /* Should never happen on Windows */ + return -1; + } + if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) { + /* Should never happen on Windows */ + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + return -1; + } + + /* Spawn thread */ + *thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL); + if (!thread) { + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); return errno; - else - return 0; + } + + /* Wait for thread to be initialized */ + ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex); + while(!thread_param.initialized) { + ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex); + } + ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex); + ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex); + ZSTD_pthread_cond_destroy(&thread_param.initialized_cond); + + return 0; } -int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +int ZSTD_pthread_join(ZSTD_pthread_t thread) { DWORD result; - if (!thread.handle) return 0; + if (!thread) return 0; + + result = WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); - result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; diff --git a/sys/contrib/zstd/lib/common/xxhash.h b/sys/contrib/zstd/lib/common/xxhash.h --- a/sys/contrib/zstd/lib/common/xxhash.h +++ b/sys/contrib/zstd/lib/common/xxhash.h @@ -1,9 +1,9 @@ /* * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the @@ -695,9 +695,6 @@ * @see * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. */ -/* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */ -__attribute__((visibility ("default"))) -/* End FreeBSD */ XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed); /******* Streaming *******/ @@ -1317,7 +1314,7 @@ * care, as what works on one compiler/platform/optimization level may cause * another to read garbage data or even crash. * - * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. * * Prefer these methods in priority order (0 > 3 > 1 > 2) */ @@ -1537,7 +1534,7 @@ * @brief Used to prevent unwanted optimizations for @p var. * * It uses an empty GCC inline assembly statement with a register constraint - * which forces @p var into a general purpose register (eg eax, ebx, ecx + * which forces @p var into a general purpose register (e.g. eax, ebx, ecx * on x86) and marks it as modified. * * This is used in a few places to avoid unwanted autovectorization (e.g. @@ -1658,7 +1655,7 @@ /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void* memPtr) { @@ -2299,7 +2296,7 @@ /* * Portable and safe solution. Generally efficient. - * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void* memPtr) { @@ -2812,7 +2809,7 @@ * @ingroup tuning * @brief Selects the minimum alignment for XXH3's accumulators. * - * When using SIMD, this should match the alignment reqired for said vector + * When using SIMD, this should match the alignment required for said vector * type, so, for example, 32 for AVX2. * * Default: Auto detected. @@ -3029,7 +3026,7 @@ * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions, * you are only using 2/3 of the CPU bandwidth. * - * This is even more noticable on the more advanced cores like the A76 which + * This is even more noticeable on the more advanced cores like the A76 which * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. * * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the diff --git a/sys/contrib/zstd/lib/common/xxhash.c b/sys/contrib/zstd/lib/common/xxhash.c --- a/sys/contrib/zstd/lib/common/xxhash.c +++ b/sys/contrib/zstd/lib/common/xxhash.c @@ -1,9 +1,9 @@ /* * xxHash - Fast Hash algorithm - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : - * - xxHash homepage: http://www.xxhash.com + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository : https://github.com/Cyan4973/xxHash * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/common/zstd_common.c b/sys/contrib/zstd/lib/common/zstd_common.c --- a/sys/contrib/zstd/lib/common/zstd_common.c +++ b/sys/contrib/zstd/lib/common/zstd_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,6 @@ * Dependencies ***************************************/ #define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ #include "error_private.h" #include "zstd_internal.h" @@ -47,37 +46,3 @@ /*! ZSTD_getErrorString() : * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } - - - -/*=************************************************************** -* Custom allocator -****************************************************************/ -void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) - return customMem.customAlloc(customMem.opaque, size); - return ZSTD_malloc(size); -} - -void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) -{ - if (customMem.customAlloc) { - /* calloc implemented as malloc+memset; - * not as efficient as calloc, but next best guess for custom malloc */ - void* const ptr = customMem.customAlloc(customMem.opaque, size); - ZSTD_memset(ptr, 0, size); - return ptr; - } - return ZSTD_calloc(1, size); -} - -void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) -{ - if (ptr!=NULL) { - if (customMem.customFree) - customMem.customFree(customMem.opaque, ptr); - else - ZSTD_free(ptr); - } -} diff --git a/sys/contrib/zstd/lib/common/zstd_deps.h b/sys/contrib/zstd/lib/common/zstd_deps.h --- a/sys/contrib/zstd/lib/common/zstd_deps.h +++ b/sys/contrib/zstd/lib/common/zstd_deps.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/common/zstd_internal.h b/sys/contrib/zstd/lib/common/zstd_internal.h --- a/sys/contrib/zstd/lib/common/zstd_internal.h +++ b/sys/contrib/zstd/lib/common/zstd_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,6 @@ #include "../zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" -#define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ @@ -93,9 +92,9 @@ #define ZSTD_FRAMECHECKSUMSIZE 4 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ +#define MIN_LITERALS_FOR_4_STREAMS 6 -#define HufLog 12 typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; #define LONGNBSEQ 0x7F00 @@ -103,6 +102,7 @@ #define MINMATCH 3 #define Litbits 8 +#define LitHufLog 11 #define MaxLit ((1<= length) return; op += 16; @@ -250,7 +246,6 @@ COPY16(op, ip); } while (op < oend); -#endif } } @@ -299,11 +294,11 @@ typedef struct { seqDef* sequencesStart; seqDef* sequences; /* ptr to end of sequences */ - BYTE* litStart; - BYTE* lit; /* ptr to end of literals */ - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; + BYTE* litStart; + BYTE* lit; /* ptr to end of literals */ + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; size_t maxNbSeq; size_t maxNbLit; @@ -311,8 +306,8 @@ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment * the existing value of the litLength or matchLength by 0x10000. */ - ZSTD_longLengthType_e longLengthType; - U32 longLengthPos; /* Index of the sequence to apply long length modification to */ + ZSTD_longLengthType_e longLengthType; + U32 longLengthPos; /* Index of the sequence to apply long length modification to */ } seqStore_t; typedef struct { @@ -331,10 +326,10 @@ seqLen.matchLength = seq->mlBase + MINMATCH; if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { - seqLen.litLength += 0xFFFF; + seqLen.litLength += 0x10000; } if (seqStore->longLengthType == ZSTD_llt_matchLength) { - seqLen.matchLength += 0xFFFF; + seqLen.matchLength += 0x10000; } } return seqLen; @@ -347,109 +342,13 @@ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { + size_t nbBlocks; size_t compressedSize; unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - -/* custom memory allocation functions */ -void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem); -void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem); -void ZSTD_customFree(void* ptr, ZSTD_customMem customMem); - - -MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ -{ - assert(val != 0); - { -# if defined(_MSC_VER) /* Visual */ -# if STATIC_BMI2 == 1 - return _lzcnt_u32(val)^31; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ - return __builtin_clz (val) ^ 31; -# elif defined(__ICCARM__) /* IAR Intrinsic */ - return 31 - __CLZ(val); -# else /* Software version */ - static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; -# endif - } -} - -/** - * Counts the number of trailing zeros of a `size_t`. - * Most compilers should support CTZ as a builtin. A backup - * implementation is provided if the builtin isn't supported, but - * it may not be terribly efficient. - */ -MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val) -{ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _tzcnt_u64(val); -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return __builtin_ctzll((U64)val); -# else - static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19, - 4, 25, 14, 28, 9, 34, 20, 56, - 5, 17, 26, 54, 15, 41, 29, 43, - 10, 31, 38, 35, 21, 45, 49, 57, - 63, 6, 12, 18, 24, 27, 33, 55, - 16, 53, 40, 42, 30, 37, 44, 48, - 62, 11, 23, 32, 52, 39, 36, 47, - 61, 22, 51, 46, 60, 50, 59, 58 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)r; - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return __builtin_ctz((U32)val); -# else - static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3, - 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, - 26, 12, 18, 6, 11, 5, 10, 9 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } -} +int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ /* ZSTD_invalidateRepCodes() : diff --git a/sys/contrib/zstd/lib/common/zstd_trace.h b/sys/contrib/zstd/lib/common/zstd_trace.h --- a/sys/contrib/zstd/lib/common/zstd_trace.h +++ b/sys/contrib/zstd/lib/common/zstd_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -21,13 +21,13 @@ * For now, enable conservatively: * - Only GNUC * - Only ELF - * - Only x86-64 and i386 + * - Only x86-64, i386 and aarch64 * Also, explicitly disable on platforms known not to work so they aren't * forgotten in the future. */ #if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ defined(__GNUC__) && defined(__ELF__) && \ - (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ !defined(__CYGWIN__) && !defined(_AIX) # define ZSTD_HAVE_WEAK_SYMBOLS 1 diff --git a/sys/contrib/zstd/lib/compress/clevels.h b/sys/contrib/zstd/lib/compress/clevels.h --- a/sys/contrib/zstd/lib/compress/clevels.h +++ b/sys/contrib/zstd/lib/compress/clevels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/fse_compress.c b/sys/contrib/zstd/lib/compress/fse_compress.c --- a/sys/contrib/zstd/lib/compress/fse_compress.c +++ b/sys/contrib/zstd/lib/compress/fse_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * FSE : Finite State Entropy encoder - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -26,6 +26,7 @@ #define ZSTD_DEPS_NEED_MALLOC #define ZSTD_DEPS_NEED_MATH64 #include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */ +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -90,7 +91,7 @@ assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + * https://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ #ifdef __clang_analyzer__ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ @@ -191,7 +192,7 @@ break; default : assert(normalizedCounter[s] > 1); - { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1); + { U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1); U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]); @@ -342,21 +343,11 @@ * FSE Compression Code ****************************************************************/ -FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) -{ - size_t size; - if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; - size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); - return (FSE_CTable*)ZSTD_malloc(size); -} - -void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); } - /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { - U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; - U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBitsSrc = ZSTD_highbit32((U32)(srcSize)) + 1; + U32 minBitsSymbols = ZSTD_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; @@ -364,7 +355,7 @@ unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { - U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 maxBitsSrc = ZSTD_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); assert(srcSize > 1); /* Not supported, RLE should be used instead */ @@ -532,40 +523,6 @@ return tableLog; } - -/* fake FSE_CTable, for raw (uncompressed) input */ -size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) -{ - const unsigned tableSize = 1 << nbBits; - const unsigned tableMask = tableSize - 1; - const unsigned maxSymbolValue = tableMask; - void* const ptr = ct; - U16* const tableU16 = ( (U16*) ptr) + 2; - void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */ - FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); - unsigned s; - - /* Sanity checks */ - if (nbBits < 1) return ERROR(GENERIC); /* min size */ - - /* header */ - tableU16[-2] = (U16) nbBits; - tableU16[-1] = (U16) maxSymbolValue; - - /* Build table */ - for (s=0; s not compressible */ - if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ - } - - tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); - CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) ); - - /* Write table description header */ - { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); - op += nc_err; - } - - /* Compress */ - CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); - if (cSize == 0) return 0; /* not enough space for compressed data */ - op += cSize; - } - - /* check compressibility */ - if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; - - return op-ostart; -} - -typedef struct { - FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; - union { - U32 hist_wksp[HIST_WKSP_SIZE_U32]; - BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; - } workspace; -} fseWkspMax_t; - -size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) -{ - fseWkspMax_t scratchBuffer; - DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ - if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); - return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); -} - -size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) -{ - return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); -} -#endif - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/sys/contrib/zstd/lib/compress/hist.h b/sys/contrib/zstd/lib/compress/hist.h --- a/sys/contrib/zstd/lib/compress/hist.h +++ b/sys/contrib/zstd/lib/compress/hist.h @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/sys/contrib/zstd/lib/compress/hist.c b/sys/contrib/zstd/lib/compress/hist.c --- a/sys/contrib/zstd/lib/compress/hist.c +++ b/sys/contrib/zstd/lib/compress/hist.c @@ -1,7 +1,7 @@ /* ****************************************************************** * hist : Histogram functions * part of Finite State Entropy project - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy diff --git a/sys/contrib/zstd/lib/compress/huf_compress.c b/sys/contrib/zstd/lib/compress/huf_compress.c --- a/sys/contrib/zstd/lib/compress/huf_compress.c +++ b/sys/contrib/zstd/lib/compress/huf_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** * Huffman encoder, part of New Generation Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -29,9 +29,9 @@ #include "hist.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "../common/fse.h" /* header compression */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /* ************************************************************** @@ -42,13 +42,67 @@ /* ************************************************************** -* Utils +* Required declarations ****************************************************************/ -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + + +/* ************************************************************** +* Debug Traces +****************************************************************/ + +#if DEBUGLEVEL >= 2 + +static size_t showU32(const U32* arr, size_t size) { - return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + size_t u; + for (u=0; u= sizeof(HUF_WriteCTableWksp)); + /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); @@ -207,16 +266,6 @@ return ((maxSymbolValue+1)/2) + 1; } -/*! HUF_writeCTable() : - `CTable` : Huffman tree to save, using huf representation. - @return : size of saved CTable */ -size_t HUF_writeCTable (void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) -{ - HUF_WriteCTableWksp wksp; - return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp)); -} - size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) { @@ -272,68 +321,64 @@ U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) { - const HUF_CElt* ct = CTable + 1; + const HUF_CElt* const ct = CTable + 1; assert(symbolValue <= HUF_SYMBOLVALUE_MAX); return (U32)HUF_getNbBits(ct[symbolValue]); } -typedef struct nodeElt_s { - U32 count; - U16 parent; - BYTE byte; - BYTE nbBits; -} nodeElt; - /** * HUF_setMaxHeight(): - * Enforces maxNbBits on the Huffman tree described in huffNode. + * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. * - * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts - * the tree to so that it is a valid canonical Huffman tree. + * It attempts to convert all nodes with nbBits > @targetNbBits + * to employ @targetNbBits instead. Then it adjusts the tree + * so that it remains a valid canonical Huffman tree. * * @pre The sum of the ranks of each symbol == 2^largestBits, * where largestBits == huffNode[lastNonNull].nbBits. * @post The sum of the ranks of each symbol == 2^largestBits, - * where largestBits is the return value <= maxNbBits. + * where largestBits is the return value (expected <= targetNbBits). * - * @param huffNode The Huffman tree modified in place to enforce maxNbBits. + * @param huffNode The Huffman tree modified in place to enforce targetNbBits. + * It's presumed sorted, from most frequent to rarest symbol. * @param lastNonNull The symbol with the lowest count in the Huffman tree. - * @param maxNbBits The maximum allowed number of bits, which the Huffman tree + * @param targetNbBits The allowed number of bits, which the Huffman tree * may not respect. After this function the Huffman tree will - * respect maxNbBits. - * @return The maximum number of bits of the Huffman tree after adjustment, - * necessarily no more than maxNbBits. + * respect targetNbBits. + * @return The maximum number of bits of the Huffman tree after adjustment. */ -static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) { const U32 largestBits = huffNode[lastNonNull].nbBits; - /* early exit : no elt > maxNbBits, so the tree is already valid. */ - if (largestBits <= maxNbBits) return largestBits; + /* early exit : no elt > targetNbBits, so the tree is already valid. */ + if (largestBits <= targetNbBits) return largestBits; + + DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); /* there are several too large elements (at least >= 2) */ { int totalCost = 0; - const U32 baseCost = 1 << (largestBits - maxNbBits); + const U32 baseCost = 1 << (largestBits - targetNbBits); int n = (int)lastNonNull; - /* Adjust any ranks > maxNbBits to maxNbBits. + /* Adjust any ranks > targetNbBits to targetNbBits. * Compute totalCost, which is how far the sum of the ranks is * we are over 2^largestBits after adjust the offending ranks. */ - while (huffNode[n].nbBits > maxNbBits) { + while (huffNode[n].nbBits > targetNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); - huffNode[n].nbBits = (BYTE)maxNbBits; + huffNode[n].nbBits = (BYTE)targetNbBits; n--; } - /* n stops at huffNode[n].nbBits <= maxNbBits */ - assert(huffNode[n].nbBits <= maxNbBits); - /* n end at index of smallest symbol using < maxNbBits */ - while (huffNode[n].nbBits == maxNbBits) --n; + /* n stops at huffNode[n].nbBits <= targetNbBits */ + assert(huffNode[n].nbBits <= targetNbBits); + /* n end at index of smallest symbol using < targetNbBits */ + while (huffNode[n].nbBits == targetNbBits) --n; - /* renorm totalCost from 2^largestBits to 2^maxNbBits + /* renorm totalCost from 2^largestBits to 2^targetNbBits * note : totalCost is necessarily a multiple of baseCost */ - assert((totalCost & (baseCost - 1)) == 0); - totalCost >>= (largestBits - maxNbBits); + assert(((U32)totalCost & (baseCost - 1)) == 0); + totalCost >>= (largestBits - targetNbBits); assert(totalCost > 0); /* repay normalized cost */ @@ -342,19 +387,19 @@ /* Get pos of last (smallest = lowest cum. count) symbol per rank */ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); - { U32 currentNbBits = maxNbBits; + { U32 currentNbBits = targetNbBits; int pos; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; - currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ - rankLast[maxNbBits-currentNbBits] = (U32)pos; + currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ + rankLast[targetNbBits-currentNbBits] = (U32)pos; } } while (totalCost > 0) { /* Try to reduce the next power of 2 above totalCost because we * gain back half the rank. */ - U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; + U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 const highPos = rankLast[nBitsToDecrease]; U32 const lowPos = rankLast[nBitsToDecrease-1]; @@ -394,7 +439,7 @@ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; - if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ @@ -406,11 +451,11 @@ * TODO. */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ - /* special case : no rank 1 symbol (using maxNbBits-1); - * let's create one from largest rank 0 (using maxNbBits). + /* special case : no rank 1 symbol (using targetNbBits-1); + * let's create one from largest rank 0 (using targetNbBits). */ if (rankLast[1] == noSymbol) { - while (huffNode[n].nbBits == maxNbBits) n--; + while (huffNode[n].nbBits == targetNbBits) n--; huffNode[n+1].nbBits--; assert(n >= 0); rankLast[1] = (U32)(n+1); @@ -424,7 +469,7 @@ } /* repay normalized cost */ } /* there are several too large elements (at least >= 2) */ - return maxNbBits; + return targetNbBits; } typedef struct { @@ -432,7 +477,7 @@ U16 curr; } rankPos; -typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; +typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; /* Number of buckets available for HUF_sort() */ #define RANK_POSITION_TABLE_SIZE 192 @@ -451,8 +496,8 @@ * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. */ #define RANK_POSITION_MAX_COUNT_LOG 32 -#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */ -#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */ +#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) +#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) /* Return the appropriate bucket index for a given count. See definition of * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. @@ -460,7 +505,7 @@ static U32 HUF_getIndex(U32 const count) { return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) ? count - : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; + : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; } /* Helper swap function for HUF_quickSortPartition() */ @@ -583,7 +628,7 @@ /* Sort each bucket. */ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { - U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base; + int const bucketSize = rankPosition[n].curr - rankPosition[n].base; U32 const bucketStartIdx = rankPosition[n].base; if (bucketSize > 1) { assert(bucketStartIdx < maxSymbolValue1); @@ -594,6 +639,7 @@ assert(HUF_isSorted(huffNode, maxSymbolValue1)); } + /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). @@ -614,6 +660,7 @@ int lowS, lowN; int nodeNb = STARTNODE; int n, nodeRoot; + DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); /* init for parents */ nonNullRank = (int)maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; @@ -640,6 +687,8 @@ for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); + return nonNullRank; } @@ -677,28 +726,36 @@ CTable[0] = maxNbBits; } -size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +size_t +HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, + void* workSpace, size_t wkspSize) { - HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); + HUF_buildCTable_wksp_tables* const wksp_tables = + (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; + HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); + + DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); + /* safety checks */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) - return ERROR(workSpace_tooSmall); + return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) - return ERROR(maxSymbolValue_tooLarge); + return ERROR(maxSymbolValue_tooLarge); ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); + DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); /* build tree */ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); - /* enforce maxTableLog */ + /* determine and enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ @@ -807,7 +864,7 @@ #if DEBUGLEVEL >= 1 { size_t const nbBits = HUF_getNbBits(elt); - size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1; + size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; (void)dirtyBits; /* Middle bits are 0. */ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); @@ -887,7 +944,7 @@ { size_t const nbBits = bitC->bitPos[0] & 0xFF; if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ - return (bitC->ptr - bitC->startPtr) + (nbBits > 0); + return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); } } @@ -1048,9 +1105,9 @@ static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - if (bmi2) { + if (flags & HUF_flags_bmi2) { return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); } return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); @@ -1061,28 +1118,23 @@ static size_t HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, const int bmi2) + const HUF_CElt* CTable, const int flags) { - (void)bmi2; + (void)flags; return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); } #endif -size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) -{ - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, const void* src, size_t srcSize, - const HUF_CElt* CTable, int bmi2) + const HUF_CElt* CTable, int flags) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; @@ -1096,7 +1148,7 @@ op += 6; /* jumpTable */ assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; @@ -1104,7 +1156,7 @@ ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; @@ -1112,7 +1164,7 @@ ip += segmentSize; assert(op <= oend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; @@ -1121,7 +1173,7 @@ ip += segmentSize; assert(op <= oend); assert(ip <= iend); - { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); if (cSize == 0 || cSize > 65535) return 0; op += cSize; } @@ -1129,14 +1181,9 @@ return (size_t)(op-ostart); } -size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) -{ - return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); -} - -size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -1144,11 +1191,11 @@ static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) + HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) { size_t const cSize = (nbStreams==HUF_singleStream) ? - HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : - HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); + HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : + HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; @@ -1171,6 +1218,79 @@ #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ +unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) +{ + unsigned cardinality = 0; + unsigned i; + + for (i = 0; i < maxSymbolValue + 1; i++) { + if (count[i] != 0) cardinality += 1; + } + + return cardinality; +} + +unsigned HUF_minTableLog(unsigned symbolCardinality) +{ + U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; + return minBitsSymbols; +} + +unsigned HUF_optimalTableLog( + unsigned maxTableLog, + size_t srcSize, + unsigned maxSymbolValue, + void* workSpace, size_t wkspSize, + HUF_CElt* table, + const unsigned* count, + int flags) +{ + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); + + if (!(flags & HUF_flags_optimalDepth)) { + /* cheap evaluation, based on FSE */ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); + } + + { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); + size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); + size_t maxBits, hSize, newSize; + const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); + const unsigned minTableLog = HUF_minTableLog(symbolCardinality); + size_t optSize = ((size_t) ~0) - 1; + unsigned optLog = maxTableLog, optLogGuess; + + DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); + + /* Search until size increases */ + for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { + DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); + maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); + if (ERR_isError(maxBits)) continue; + + if (maxBits < optLogGuess && optLogGuess > minTableLog) break; + + hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); + + if (ERR_isError(hSize)) continue; + + newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; + + if (newSize > optSize + 1) { + break; + } + + if (newSize < optSize) { + optSize = newSize; + optLog = optLogGuess; + } + } + assert(optLog <= HUF_TABLELOG_MAX); + return optLog; + } +} + /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ @@ -1180,14 +1300,14 @@ unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2, unsigned suspectUncompressible) + HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; + DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); /* checks & inits */ @@ -1201,16 +1321,17 @@ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; /* Heuristic : If old table is valid, use it for small inputs */ - if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* If uncompressible data is suspected, do a smaller sampling first */ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); - if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { size_t largestTotal = 0; + DEBUGLOG(5, "input suspected incompressible : sampling to check"); { unsigned maxSymbolValueBegin = maxSymbolValue; CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); largestTotal += largestBegin; @@ -1227,6 +1348,7 @@ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } + DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); /* Check validity of previous table */ if ( repeat @@ -1235,19 +1357,20 @@ *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ - if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } /* Build Huffman Tree */ - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, maxSymbolValue, huffLog, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; + DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); } /* Zero unused symbols in CTable, so we can check it for validity */ { @@ -1266,7 +1389,7 @@ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + nbStreams, oldHufTable, flags); } } /* Use the new huffman table */ @@ -1278,46 +1401,20 @@ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, table->CTable, bmi2); -} - - -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + nbStreams, table->CTable, flags); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, - int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2, suspectUncompressible); -} - -/* HUF_compress4X_repeat(): - * compress input using 4 streams. - * provide workspace to generate compression tables */ -size_t HUF_compress4X_wksp (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - void* workSpace, size_t wkspSize) -{ - return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, - workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/, 0); + repeat, flags); } /* HUF_compress4X_repeat(): @@ -1328,43 +1425,11 @@ const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) + HUF_CElt* hufTable, HUF_repeat* repeat, int flags) { + DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); + hufTable, repeat, flags); } - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -/** HUF_buildCTable() : - * @return : maxNbBits - * Note : count is used before tree is written, so they can safely overlap - */ -size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) -{ - HUF_buildCTable_wksp_tables workspace; - return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace)); -} - -size_t HUF_compress1X (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress2 (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog) -{ - U64 workSpace[HUF_WORKSPACE_SIZE_U64]; - return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); -} - -size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) -{ - return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); -} -#endif diff --git a/sys/contrib/zstd/lib/compress/zstd_compress.c b/sys/contrib/zstd/lib/compress/zstd_compress.c --- a/sys/contrib/zstd/lib/compress/zstd_compress.c +++ b/sys/contrib/zstd/lib/compress/zstd_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,12 +11,12 @@ /*-************************************* * Dependencies ***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ #include "../common/mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_compress_internal.h" #include "zstd_compress_sequences.h" @@ -27,6 +27,7 @@ #include "zstd_opt.h" #include "zstd_ldm.h" #include "zstd_compress_superblock.h" +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */ /* *************************************************************** * Tuning parameters @@ -58,14 +59,17 @@ * Helper functions ***************************************/ /* ZSTD_compressBound() - * Note that the result from this function is only compatible with the "normal" - * full-block strategy. - * When there are a lot of small blocks due to frequent flush in streaming mode - * the overhead of headers can make the compressed data to be larger than the - * return value of ZSTD_compressBound(). + * Note that the result from this function is only valid for + * the one-pass compression functions. + * When employing the streaming mode, + * if flushes are frequently altering the size of blocks, + * the overhead from block headers can make the compressed data larger + * than the return value of ZSTD_compressBound(). */ size_t ZSTD_compressBound(size_t srcSize) { - return ZSTD_COMPRESSBOUND(srcSize); + size_t const r = ZSTD_COMPRESSBOUND(srcSize); + if (r==0) return ERROR(srcSize_wrong); + return r; } @@ -177,12 +181,9 @@ if (cctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(cctx->staticSize, memory_allocation, "not compatible with static CCtx"); - { - int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); ZSTD_freeCCtxContent(cctx); - if (!cctxInWorkspace) { - ZSTD_customFree(cctx, cctx->customMem); - } + if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem); } return 0; } @@ -267,9 +268,9 @@ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); } -/* Returns 1 if compression parameters are such that we should +/* Returns ZSTD_ps_enable if compression parameters are such that we should * enable long distance matching (wlog >= 27, strategy >= btopt). - * Returns 0 otherwise. + * Returns ZSTD_ps_disable otherwise. */ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, const ZSTD_compressionParameters* const cParams) { @@ -277,6 +278,34 @@ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } +static int ZSTD_resolveExternalSequenceValidation(int mode) { + return mode; +} + +/* Resolves maxBlockSize to the default if no value is present. */ +static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) { + if (maxBlockSize == 0) { + return ZSTD_BLOCKSIZE_MAX; + } else { + return maxBlockSize; + } +} + +static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) { + if (value != ZSTD_ps_auto) return value; + if (cLevel < 10) { + return ZSTD_ps_disable; + } else { + return ZSTD_ps_enable; + } +} + +/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged. + * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */ +static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) { + return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast; +} + static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { @@ -294,6 +323,10 @@ } cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); + cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes, + cctxParams.compressionLevel); assert(!ZSTD_checkCParams(cParams)); return cctxParams; } @@ -339,10 +372,13 @@ #define ZSTD_NO_CLEVEL 0 /** - * Initializes the cctxParams from params and compressionLevel. + * Initializes `cctxParams` from `params` and `compressionLevel`. * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL. */ -static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel) +static void +ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, + const ZSTD_parameters* params, + int compressionLevel) { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); @@ -355,6 +391,9 @@ cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); } @@ -369,7 +408,7 @@ /** * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. - * @param param Validated zstd parameters. + * @param params Validated zstd parameters. */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) @@ -478,8 +517,8 @@ return bounds; case ZSTD_c_enableLongDistanceMatching: - bounds.lowerBound = 0; - bounds.upperBound = 1; + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_ldmHashLog: @@ -572,6 +611,26 @@ bounds.upperBound = 1; return bounds; + case ZSTD_c_prefetchCDictTables: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + + case ZSTD_c_enableSeqProducerFallback: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_maxBlockSize: + bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_searchForExternalRepcodes: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + default: bounds.error = ERROR(parameter_unsupported); return bounds; @@ -636,6 +695,10 @@ case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_searchForExternalRepcodes: default: return 0; } @@ -648,7 +711,7 @@ if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { - RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); + RETURN_ERROR(stage_wrong, "can only set params in cctx init stage"); } } switch(param) @@ -691,6 +754,10 @@ case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: + case ZSTD_c_searchForExternalRepcodes: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); @@ -746,12 +813,12 @@ case ZSTD_c_minMatch : if (value!=0) /* 0 => use default */ BOUNDCHECK(ZSTD_c_minMatch, value); - CCtxParams->cParams.minMatch = value; + CCtxParams->cParams.minMatch = (U32)value; return CCtxParams->cParams.minMatch; case ZSTD_c_targetLength : BOUNDCHECK(ZSTD_c_targetLength, value); - CCtxParams->cParams.targetLength = value; + CCtxParams->cParams.targetLength = (U32)value; return CCtxParams->cParams.targetLength; case ZSTD_c_strategy : @@ -764,12 +831,12 @@ /* Content size written in frame header _when known_ (default:1) */ DEBUGLOG(4, "set content size flag = %u", (value!=0)); CCtxParams->fParams.contentSizeFlag = value != 0; - return CCtxParams->fParams.contentSizeFlag; + return (size_t)CCtxParams->fParams.contentSizeFlag; case ZSTD_c_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ CCtxParams->fParams.checksumFlag = value != 0; - return CCtxParams->fParams.checksumFlag; + return (size_t)CCtxParams->fParams.checksumFlag; case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); @@ -778,18 +845,18 @@ case ZSTD_c_forceMaxWindow : CCtxParams->forceWindow = (value != 0); - return CCtxParams->forceWindow; + return (size_t)CCtxParams->forceWindow; case ZSTD_c_forceAttachDict : { const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; - BOUNDCHECK(ZSTD_c_forceAttachDict, pref); + BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref); CCtxParams->attachDictPref = pref; return CCtxParams->attachDictPref; } case ZSTD_c_literalCompressionMode : { const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); + BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; } @@ -840,47 +907,48 @@ case ZSTD_c_enableDedicatedDictSearch : CCtxParams->enableDedicatedDictSearch = (value!=0); - return CCtxParams->enableDedicatedDictSearch; + return (size_t)CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : + BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : if (value!=0) /* 0 ==> auto */ BOUNDCHECK(ZSTD_c_ldmHashLog, value); - CCtxParams->ldmParams.hashLog = value; + CCtxParams->ldmParams.hashLog = (U32)value; return CCtxParams->ldmParams.hashLog; case ZSTD_c_ldmMinMatch : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmMinMatch, value); - CCtxParams->ldmParams.minMatchLength = value; + CCtxParams->ldmParams.minMatchLength = (U32)value; return CCtxParams->ldmParams.minMatchLength; case ZSTD_c_ldmBucketSizeLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); - CCtxParams->ldmParams.bucketSizeLog = value; + CCtxParams->ldmParams.bucketSizeLog = (U32)value; return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); - CCtxParams->ldmParams.hashRateLog = value; + CCtxParams->ldmParams.hashRateLog = (U32)value; return CCtxParams->ldmParams.hashRateLog; case ZSTD_c_targetCBlockSize : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_targetCBlockSize, value); - CCtxParams->targetCBlockSize = value; + CCtxParams->targetCBlockSize = (U32)value; return CCtxParams->targetCBlockSize; case ZSTD_c_srcSizeHint : if (value!=0) /* 0 ==> default */ BOUNDCHECK(ZSTD_c_srcSizeHint, value); CCtxParams->srcSizeHint = value; - return CCtxParams->srcSizeHint; + return (size_t)CCtxParams->srcSizeHint; case ZSTD_c_stableInBuffer: BOUNDCHECK(ZSTD_c_stableInBuffer, value); @@ -917,6 +985,27 @@ CCtxParams->deterministicRefPrefix = !!value; return CCtxParams->deterministicRefPrefix; + case ZSTD_c_prefetchCDictTables: + BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); + CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; + return CCtxParams->prefetchCDictTables; + + case ZSTD_c_enableSeqProducerFallback: + BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value); + CCtxParams->enableMatchFinderFallback = value; + return CCtxParams->enableMatchFinderFallback; + + case ZSTD_c_maxBlockSize: + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_maxBlockSize, value); + CCtxParams->maxBlockSize = value; + return CCtxParams->maxBlockSize; + + case ZSTD_c_searchForExternalRepcodes: + BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value); + CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; + return CCtxParams->searchForExternalRepcodes; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } } @@ -1049,6 +1138,18 @@ case ZSTD_c_deterministicRefPrefix: *value = (int)CCtxParams->deterministicRefPrefix; break; + case ZSTD_c_prefetchCDictTables: + *value = (int)CCtxParams->prefetchCDictTables; + break; + case ZSTD_c_enableSeqProducerFallback: + *value = CCtxParams->enableMatchFinderFallback; + break; + case ZSTD_c_maxBlockSize: + *value = (int)CCtxParams->maxBlockSize; + break; + case ZSTD_c_searchForExternalRepcodes: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; @@ -1075,9 +1176,47 @@ return 0; } +size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams) +{ + ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setCParams"); + /* only update if all parameters are valid */ + FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, cparams.windowLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, cparams.chainLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, cparams.hashLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, cparams.searchLog), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, cparams.minMatch), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, cparams.targetLength), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, cparams.strategy), ""); + return 0; +} + +size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams) +{ + ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */); + DEBUGLOG(4, "ZSTD_CCtx_setFParams"); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), ""); + FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), ""); + return 0; +} + +size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParams"); + /* First check cParams, because we want to update all or none. */ + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + /* Next set fParams, because this could fail if the cctx isn't in init stage. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), ""); + /* Finally set cParams, which should succeed. */ + FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), ""); + return 0; +} + size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { - DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; @@ -1093,9 +1232,9 @@ ZSTD_compressionParameters* cParams); /** - * Initializes the local dict using the requested parameters. - * NOTE: This does not use the pledged src size, because it may be used for more - * than one compression. + * Initializes the local dictionary using requested parameters. + * NOTE: Initialization does not employ the pledged src size, + * because the dictionary may be used for multiple compressions. */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { @@ -1108,8 +1247,8 @@ return 0; } if (dl->cdict != NULL) { - assert(cctx->cdict == dl->cdict); /* Local dictionary already initialized. */ + assert(cctx->cdict == dl->cdict); return 0; } assert(dl->dictSize > 0); @@ -1129,26 +1268,30 @@ } size_t ZSTD_CCtx_loadDictionary_advanced( - ZSTD_CCtx* cctx, const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) + ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) { - RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't load a dictionary when ctx is not in init stage."); DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); - ZSTD_clearAllDicts(cctx); /* in case one already exists */ - if (dict == NULL || dictSize == 0) /* no dictionary mode */ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't load a dictionary when cctx is not in init stage."); + ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */ + if (dict == NULL || dictSize == 0) /* no dictionary */ return 0; if (dictLoadMethod == ZSTD_dlm_byRef) { cctx->localDict.dict = dict; } else { + /* copy dictionary content inside CCtx to own its lifetime */ void* dictBuffer; RETURN_ERROR_IF(cctx->staticSize, memory_allocation, - "no malloc for static CCtx"); + "static CCtx can't allocate for an internal copy of dictionary"); dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem); - RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); + RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation, + "allocation failed for dictionary content"); ZSTD_memcpy(dictBuffer, dict, dictSize); - cctx->localDict.dictBuffer = dictBuffer; - cctx->localDict.dict = dictBuffer; + cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */ + cctx->localDict.dict = dictBuffer; /* read-only reference */ } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; @@ -1218,8 +1361,9 @@ if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, - "Can't reset parameters only when not in init stage."); + "Reset parameters is only possible during init stage."); ZSTD_clearAllDicts(cctx); + ZSTD_memset(&cctx->externalMatchCtx, 0, sizeof(cctx->externalMatchCtx)); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } return 0; @@ -1316,7 +1460,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, - ZSTD_cParamMode_e mode) + ZSTD_cParamMode_e mode, + ZSTD_paramSwitch_e useRowMatchFinder) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); @@ -1350,8 +1495,8 @@ } /* resize windowLog if input is small enough, to use less memory */ - if ( (srcSize < maxWindowResize) - && (dictSize < maxWindowResize) ) { + if ( (srcSize <= maxWindowResize) + && (dictSize <= maxWindowResize) ) { U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : @@ -1369,6 +1514,42 @@ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ + /* We can't use more than 32 bits of hash in total, so that means that we require: + * (hashLog + 8) <= 32 && (chainLog + 8) <= 32 + */ + if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) { + U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS; + if (cPar.hashLog > maxShortCacheHashLog) { + cPar.hashLog = maxShortCacheHashLog; + } + if (cPar.chainLog > maxShortCacheHashLog) { + cPar.chainLog = maxShortCacheHashLog; + } + } + + + /* At this point, we aren't 100% sure if we are using the row match finder. + * Unless it is explicitly disabled, conservatively assume that it is enabled. + * In this case it will only be disabled for small sources, so shrinking the + * hash log a little bit shouldn't result in any ratio loss. + */ + if (useRowMatchFinder == ZSTD_ps_auto) + useRowMatchFinder = ZSTD_ps_enable; + + /* We can't hash more than 32-bits in total. So that means that we require: + * (hashLog - rowLog + 8) <= 32 + */ + if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) { + /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cPar.searchLog, 6); + U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS; + U32 const maxHashLog = maxRowHashLog + rowLog; + assert(cPar.hashLog >= rowLog); + if (cPar.hashLog > maxHashLog) { + cPar.hashLog = maxHashLog; + } + } + return cPar; } @@ -1379,7 +1560,7 @@ { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; - return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown); + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); } static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); @@ -1410,7 +1591,7 @@ ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ - return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder); } static size_t @@ -1439,7 +1620,7 @@ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)) + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder) - ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16)) + ? ZSTD_cwksp_aligned_alloc_size(hSize) : 0; size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) ? optPotentialSpace @@ -1455,6 +1636,13 @@ return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; } +/* Helper function for calculating memory requirements. + * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */ +static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) { + U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4; + return blockSize / divider; +} + static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, @@ -1462,12 +1650,13 @@ const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, - const U64 pledgedSrcSize) + const U64 pledgedSrcSize, + int useSequenceProducer, + size_t maxBlockSize) { size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (cParams->minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); @@ -1486,6 +1675,11 @@ size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + size_t const externalSeqSpace = useSequenceProducer + ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) + : 0; + size_t const neededSpace = cctxSpace + entropySpace + @@ -1494,7 +1688,8 @@ ldmSeqSpace + matchStateSize + tokenSpace + - bufferSpace; + bufferSpace + + externalSeqSpace; DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; @@ -1512,7 +1707,7 @@ * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( - &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN); + &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize); } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) @@ -1562,7 +1757,7 @@ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog); size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; @@ -1573,7 +1768,7 @@ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, - ZSTD_CONTENTSIZE_UNKNOWN); + ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize); } } @@ -1716,6 +1911,19 @@ ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; +/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */ +static U64 ZSTD_bitmix(U64 val, U64 len) { + val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24); + val *= 0x9FB21C651E98DF25ULL; + val ^= (val >> 35) + len ; + val *= 0x9FB21C651E98DF25ULL; + return val ^ (val >> 28); +} + +/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ +static void ZSTD_advanceHashSalt(ZSTD_matchState_t* ms) { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); +} static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, @@ -1743,6 +1951,7 @@ } ms->hashLog3 = hashLog3; + ms->lazySkipping = 0; ZSTD_invalidateMatchState(ms); @@ -1764,6 +1973,27 @@ ZSTD_cwksp_clean_tables(ws); } + if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { + /* Row match finder needs an additional table of hashes ("tags") */ + size_t const tagTableSize = hSize; + /* We want to generate a new salt in case we reset a Cctx, but we always want to use + * 0 when we reset a Cdict */ + if(forWho == ZSTD_resetTarget_CCtx) { + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize); + ZSTD_advanceHashSalt(ms); + } else { + /* When we are not salting we want to always memset the memory */ + ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize); + ZSTD_memset(ms->tagTable, 0, tagTableSize); + ms->hashSalt = 0; + } + { /* Switch to 32-entry rows if searchLog is 5 (or more) */ + U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); + assert(cParams->hashLog >= rowLog); + ms->rowHashLog = cParams->hashLog - rowLog; + } + } + /* opt parser space */ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { DEBUGLOG(4, "reserving optimal parser space"); @@ -1775,19 +2005,6 @@ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); } - if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) { - { /* Row match finder needs an additional table of hashes ("tags") */ - size_t const tagTableSize = hSize*sizeof(U16); - ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize); - if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); - } - { /* Switch to 32-entry rows if searchLog is 5 (or more) */ - U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); - assert(cParams->hashLog >= rowLog); - ms->rowHashLog = cParams->hashLog - rowLog; - } - } - ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, @@ -1847,6 +2064,7 @@ assert(params->useRowMatchFinder != ZSTD_ps_auto); assert(params->useBlockSplitter != ZSTD_ps_auto); assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + assert(params->maxBlockSize != 0); if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); @@ -1855,9 +2073,8 @@ } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); - U32 const divider = (params->cParams.minMatch==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; + size_t const blockSize = MIN(params->maxBlockSize, windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params->useSequenceProducer); size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; @@ -1874,7 +2091,7 @@ size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, - buffInSize, buffOutSize, pledgedSrcSize); + buffInSize, buffOutSize, pledgedSrcSize, params->useSequenceProducer, params->maxBlockSize); int resizeWorkspace; FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); @@ -1917,6 +2134,7 @@ /* init params */ zc->blockState.matchState.cParams = params->cParams; + zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; @@ -1933,13 +2151,46 @@ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + ¶ms->cParams, + params->useRowMatchFinder, + crp, + needsIndexReset, + ZSTD_resetTarget_CCtx), ""); + + zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); + + /* ldm hash table */ + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { + /* TODO: avoid memset? */ + size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); + ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); + zc->maxNbLdmSequences = maxNbLdmSeq; + + ZSTD_window_init(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; + } + + /* reserve space for block-level external sequences */ + if (params->useSequenceProducer) { + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->externalMatchCtx.seqBufferCapacity = maxNbExternalSeq; + zc->externalMatchCtx.seqBuffer = + (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); + } + + /* buffers */ + /* ZSTD_wildcopy() is used to copy into the literals buffer, * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. */ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); zc->seqStore.maxNbLit = blockSize; - /* buffers */ zc->bufferedPolicy = zbuff; zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); @@ -1962,32 +2213,9 @@ zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); - zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); - - FORWARD_IF_ERROR(ZSTD_reset_matchState( - &zc->blockState.matchState, - ws, - ¶ms->cParams, - params->useRowMatchFinder, - crp, - needsIndexReset, - ZSTD_resetTarget_CCtx), ""); - - /* ldm hash table */ - if (params->ldmParams.enableLdm == ZSTD_ps_enable) { - /* TODO: avoid memset? */ - size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; - zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); - ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); - zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); - zc->maxNbLdmSequences = maxNbLdmSeq; - - ZSTD_window_init(&zc->ldmState.window); - zc->ldmState.loadedDictEnd = 0; - } DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace)); zc->initialized = 1; @@ -2059,7 +2287,8 @@ } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, - cdict->dictContentSize, ZSTD_cpm_attachDict); + cdict->dictContentSize, ZSTD_cpm_attachDict, + params.useRowMatchFinder); params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, @@ -2098,6 +2327,22 @@ return 0; } +static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize, + ZSTD_compressionParameters const* cParams) { + if (ZSTD_CDictIndicesAreTagged(cParams)){ + /* Remove tags from the CDict table if they are present. + * See docs on "short cache" in zstd_compress_internal.h for context. */ + size_t i; + for (i = 0; i < tableSize; i++) { + U32 const taggedIndex = src[i]; + U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS; + dst[i] = index; + } + } else { + ZSTD_memcpy(dst, src, tableSize * sizeof(U32)); + } +} + static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, @@ -2133,21 +2378,23 @@ : 0; size_t const hSize = (size_t)1 << cdict_cParams->hashLog; - ZSTD_memcpy(cctx->blockState.matchState.hashTable, - cdict->matchState.hashTable, - hSize * sizeof(U32)); + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize, cdict_cParams); + /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) { - ZSTD_memcpy(cctx->blockState.matchState.chainTable, - cdict->matchState.chainTable, - chainSize * sizeof(U32)); + ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize, cdict_cParams); } /* copy tag table */ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) { - size_t const tagTableSize = hSize*sizeof(U16); + size_t const tagTableSize = hSize; ZSTD_memcpy(cctx->blockState.matchState.tagTable, - cdict->matchState.tagTable, - tagTableSize); + cdict->matchState.tagTable, + tagTableSize); + cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt; } } @@ -2226,6 +2473,7 @@ params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; + params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff); @@ -2385,7 +2633,7 @@ /* See doc/zstd_compression_format.md for detailed format description */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +int ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; @@ -2393,18 +2641,24 @@ BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; + int longOffsets = 0; assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u= STREAM_ACCUMULATOR_MIN)); + if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN) + longOffsets = 1; } if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthType==ZSTD_llt_matchLength) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; + return longOffsets; } /* ZSTD_useTargetCBlockSize(): @@ -2438,6 +2692,7 @@ U32 MLtype; size_t size; size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ + int longOffsets; } ZSTD_symbolEncodingTypeStats_t; /* ZSTD_buildSequencesStatistics(): @@ -2448,11 +2703,13 @@ * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, - const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, - BYTE* dst, const BYTE* const dstEnd, - ZSTD_strategy strategy, unsigned* countWorkspace, - void* entropyWorkspace, size_t entropyWkspSize) { +ZSTD_buildSequencesStatistics( + const seqStore_t* seqStorePtr, size_t nbSeq, + const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, + BYTE* dst, const BYTE* const dstEnd, + ZSTD_strategy strategy, unsigned* countWorkspace, + void* entropyWorkspace, size_t entropyWkspSize) +{ BYTE* const ostart = dst; const BYTE* const oend = dstEnd; BYTE* op = ostart; @@ -2466,7 +2723,7 @@ stats.lastCountSize = 0; /* convert length/distances into codes */ - ZSTD_seqToCodes(seqStorePtr); + stats.longOffsets = ZSTD_seqToCodes(seqStorePtr); assert(op <= oend); assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ /* build CTable for Literal Lengths */ @@ -2571,22 +2828,22 @@ */ #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t -ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - void* entropyWorkspace, size_t entropyWkspSize, - const int bmi2) +ZSTD_entropyCompressSeqStore_internal( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) { - const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; unsigned* count = (unsigned*)entropyWorkspace; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; const seqDef* const sequences = seqStorePtr->sequencesStart; - const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; @@ -2594,29 +2851,31 @@ BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t lastCountSize; + int longOffsets = 0; entropyWorkspace = count + (MaxSeq + 1); entropyWkspSize -= (MaxSeq + 1) * sizeof(*count); - DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq); + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity); ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= HUF_WORKSPACE_SIZE); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; - size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; - size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; + size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); size_t const litSize = (size_t)(seqStorePtr->lit - literals); + size_t const cSize = ZSTD_compressLiterals( - &prevEntropy->huf, &nextEntropy->huf, - cctxParams->cParams.strategy, - ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, - bmi2, suspectUncompressible); + &prevEntropy->huf, &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_literalsCompressionIsDisabled(cctxParams), + suspectUncompressible, bmi2); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; @@ -2642,11 +2901,10 @@ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } - { - ZSTD_symbolEncodingTypeStats_t stats; - BYTE* seqHead = op++; + { BYTE* const seqHead = op++; /* build stats for sequences */ - stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, + const ZSTD_symbolEncodingTypeStats_t stats = + ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq, &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, @@ -2655,6 +2913,7 @@ *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); lastCountSize = stats.lastCountSize; op += stats.size; + longOffsets = stats.longOffsets; } { size_t const bitstreamSize = ZSTD_encodeSequences( @@ -2689,14 +2948,15 @@ } MEM_STATIC size_t -ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - size_t srcSize, - void* entropyWorkspace, size_t entropyWkspSize, - int bmi2) +ZSTD_entropyCompressSeqStore( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) { size_t const cSize = ZSTD_entropyCompressSeqStore_internal( seqStorePtr, prevEntropy, nextEntropy, cctxParams, @@ -2706,15 +2966,21 @@ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) + if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) { + DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); return 0; /* block not compressed */ + } FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } - DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); + /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly. + * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above. + */ + assert(cSize < ZSTD_BLOCKSIZE_MAX); return cSize; } @@ -2809,6 +3075,72 @@ ssPtr->longLengthType = ZSTD_llt_none; } +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. + * - Appends a block delimiter to outSeqs if one is not already present. + * See zstd.h for context regarding block delimiters. + * Returns the number of sequences after post-processing, or an error code. */ +static size_t ZSTD_postProcessSequenceProducerResult( + ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize +) { + RETURN_ERROR_IF( + nbExternalSeqs > outSeqsCapacity, + sequenceProducer_failed, + "External sequence producer returned error code %lu", + (unsigned long)nbExternalSeqs + ); + + RETURN_ERROR_IF( + nbExternalSeqs == 0 && srcSize > 0, + sequenceProducer_failed, + "Got zero sequences from external sequence producer for a non-empty src buffer!" + ); + + if (srcSize == 0) { + ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence)); + return 1; + } + + { + ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1]; + + /* We can return early if lastSeq is already a block delimiter. */ + if (lastSeq.offset == 0 && lastSeq.matchLength == 0) { + return nbExternalSeqs; + } + + /* This error condition is only possible if the external matchfinder + * produced an invalid parse, by definition of ZSTD_sequenceBound(). */ + RETURN_ERROR_IF( + nbExternalSeqs == outSeqsCapacity, + sequenceProducer_failed, + "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!" + ); + + /* lastSeq is not a block delimiter, so we need to append one. */ + ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence)); + return nbExternalSeqs + 1; + } +} + +/* ZSTD_fastSequenceLengthSum() : + * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*. + * Similar to another function in zstd_compress.c (determine_blockSize), + * except it doesn't check for a block delimiter to end summation. + * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P). + * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */ +static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) { + size_t matchLenSum, litLenSum, i; + matchLenSum = 0; + litLenSum = 0; + for (i = 0; i < seqBufSize; i++) { + litLenSum += seqBuf[i].litLength; + matchLenSum += seqBuf[i].matchLength; + } + return litLenSum + matchLenSum; +} + typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) @@ -2818,7 +3150,9 @@ assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); - if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); } else { @@ -2854,6 +3188,15 @@ } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); + + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + zc->appliedParams.useSequenceProducer, + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2865,6 +3208,14 @@ } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( + zc->appliedParams.useSequenceProducer, + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); + ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; /* Updates ldmSeqStore.size */ @@ -2879,7 +3230,68 @@ zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); - } else { /* not long range mode */ + } else if (zc->appliedParams.useSequenceProducer) { + assert( + zc->externalMatchCtx.seqBufferCapacity >= ZSTD_sequenceBound(srcSize) + ); + assert(zc->externalMatchCtx.mFinder != NULL); + + { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog; + + size_t const nbExternalSeqs = (zc->externalMatchCtx.mFinder)( + zc->externalMatchCtx.mState, + zc->externalMatchCtx.seqBuffer, + zc->externalMatchCtx.seqBufferCapacity, + src, srcSize, + NULL, 0, /* dict and dictSize, currently not supported */ + zc->appliedParams.compressionLevel, + windowSize + ); + + size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult( + zc->externalMatchCtx.seqBuffer, + nbExternalSeqs, + zc->externalMatchCtx.seqBufferCapacity, + srcSize + ); + + /* Return early if there is no error, since we don't need to worry about last literals */ + if (!ZSTD_isError(nbPostProcessedSeqs)) { + ZSTD_sequencePosition seqPos = {0,0,0}; + size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs); + RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); + FORWARD_IF_ERROR( + ZSTD_copySequencesToSeqStoreExplicitBlockDelim( + zc, &seqPos, + zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs, + src, srcSize, + zc->appliedParams.searchForExternalRepcodes + ), + "Failed to copy external sequences to seqStore!" + ); + ms->ldmSeqStore = NULL; + DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs); + return ZSTDbss_compress; + } + + /* Propagate the error if fallback is disabled */ + if (!zc->appliedParams.enableMatchFinderFallback) { + return nbPostProcessedSeqs; + } + + /* Fallback to software matchfinder */ + { ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); + ms->ldmSeqStore = NULL; + DEBUGLOG( + 5, + "External sequence producer returned error code %lu. Falling back to internal parser.", + (unsigned long)nbExternalSeqs + ); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } } + } else { /* not long range mode and no external matchfinder */ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->appliedParams.useRowMatchFinder, dictMode); @@ -2940,7 +3352,7 @@ /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode so we provide seqStoreSeqs[i].offset - 1 */ ZSTD_updateRep(updatedRepcodes.rep, - seqStoreSeqs[i].offBase - 1, + seqStoreSeqs[i].offBase, seqStoreSeqs[i].litLength == 0); literalsRead += outSeqs[i].litLength; } @@ -2956,6 +3368,10 @@ zc->seqCollector.seqIndex += seqStoreSeqSize; } +size_t ZSTD_sequenceBound(size_t srcSize) { + return (srcSize / ZSTD_MINMATCH_MIN) + 1; +} + size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize) { @@ -3001,19 +3417,17 @@ const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; - size_t u; if (length == 1) return 1; /* Check if prefix is RLE first before using unrolled loop */ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) { return 0; } for (i = prefixLength; i != length; i += unrollSize) { + size_t u; for (u = 0; u < unrollSize; u += sizeof(size_t)) { if (MEM_readST(ip + i + u) != valueST) { return 0; - } - } - } + } } } return 1; } @@ -3029,7 +3443,8 @@ return nbSeqs < 4 && nbLits < 10; } -static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) +static void +ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs) { ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; bs->prevCBlock = bs->nextCBlock; @@ -3037,7 +3452,9 @@ } /* Writes the block header */ -static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) { +static void +writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) +{ U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); @@ -3050,13 +3467,16 @@ * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * Requires ENTROPY_WORKSPACE_SIZE workspace - * @return : size of huffman description table or error code */ -static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, - const ZSTD_hufCTables_t* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_hufCTablesMetadata_t* hufMetadata, - const int literalsCompressionIsDisabled, - void* workspace, size_t wkspSize) + * @return : size of huffman description table, or an error code + */ +static size_t +ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + const int literalsCompressionIsDisabled, + void* workspace, size_t wkspSize, + int hufFlags) { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; @@ -3064,9 +3484,9 @@ unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; - const size_t nodeWkspSize = wkspEnd-nodeWksp; + const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp); unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; - unsigned huffLog = HUF_TABLELOG_DEFAULT; + unsigned huffLog = LitHufLog; HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); @@ -3081,73 +3501,77 @@ /* small ? don't even attempt compression (speed opt) */ #ifndef COMPRESS_LITERALS_SIZE_MIN -#define COMPRESS_LITERALS_SIZE_MIN 63 +# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */ #endif { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; if (srcSize <= minLitSize) { DEBUGLOG(5, "set_basic - too small"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Scan input and build symbol stats */ - { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); + { size_t const largest = + HIST_count_wksp (countWksp, &maxSymbolValue, + (const BYTE*)src, srcSize, + workspace, wkspSize); FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); if (largest == srcSize) { + /* only one literal symbol */ DEBUGLOG(5, "set_rle"); hufMetadata->hType = set_rle; return 0; } if (largest <= (srcSize >> 7)+4) { + /* heuristic: likely not compressible */ DEBUGLOG(5, "set_basic - no gain"); hufMetadata->hType = set_basic; return 0; - } - } + } } /* Validate the previous Huffman table */ - if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { + if (repeat == HUF_repeat_check + && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { repeat = HUF_repeat_none; } /* Build Huffman Tree */ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); - huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags); + assert(huffLog <= LitHufLog); { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue, huffLog, nodeWksp, nodeWkspSize); FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); huffLog = (U32)maxBits; - { /* Build and write the CTable */ - size_t const newCSize = HUF_estimateCompressedSize( - (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); - size_t const hSize = HUF_writeCTable_wksp( - hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), - (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, - nodeWksp, nodeWkspSize); - /* Check against repeating the previous CTable */ - if (repeat != HUF_repeat_none) { - size_t const oldCSize = HUF_estimateCompressedSize( - (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); - if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { - DEBUGLOG(5, "set_repeat - smaller"); - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_repeat; - return 0; - } - } - if (newCSize + hSize >= srcSize) { - DEBUGLOG(5, "set_basic - no gains"); + } + { /* Build and write the CTable */ + size_t const newCSize = HUF_estimateCompressedSize( + (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); + size_t const hSize = HUF_writeCTable_wksp( + hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), + (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + /* Check against repeating the previous CTable */ + if (repeat != HUF_repeat_none) { + size_t const oldCSize = HUF_estimateCompressedSize( + (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { + DEBUGLOG(5, "set_repeat - smaller"); ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - hufMetadata->hType = set_basic; + hufMetadata->hType = set_repeat; return 0; - } - DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); - hufMetadata->hType = set_compressed; - nextHuf->repeatMode = HUF_repeat_check; - return hSize; + } } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; } + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; + return hSize; } } @@ -3157,8 +3581,9 @@ * and updates nextEntropy to the appropriate repeatMode. */ static ZSTD_symbolEncodingTypeStats_t -ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { - ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0}; +ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) +{ + ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0}; nextEntropy->litlength_repeatMode = FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat_none; nextEntropy->matchlength_repeatMode = FSE_repeat_none; @@ -3169,16 +3594,18 @@ * Builds entropy for the sequences. * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. - * @return : size of fse tables or error code */ -static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, - const ZSTD_fseCTables_t* prevEntropy, - ZSTD_fseCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize) + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( + const seqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize) { ZSTD_strategy const strategy = cctxParams->cParams.strategy; - size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; @@ -3205,23 +3632,28 @@ /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * Requires workspace size ENTROPY_WORKSPACE_SIZE - * - * @return : 0 on success or error code + * @return : 0 on success, or an error code + * Note : also employed in superblock */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize) -{ - size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; +size_t ZSTD_buildBlockEntropyStats( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize) +{ + size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); + int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD); + int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0; + entropyMetadata->hufMetadata.hufDesSize = ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, ZSTD_literalsCompressionIsDisabled(cctxParams), - workspace, wkspSize); + workspace, wkspSize, hufFlags); + FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, @@ -3234,11 +3666,12 @@ } /* Returns the size estimate for the literals section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, - const ZSTD_hufCTables_t* huf, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; @@ -3260,12 +3693,13 @@ } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ -static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, - const BYTE* codeTable, size_t nbSeq, unsigned maxCode, - const FSE_CTable* fseCTable, - const U8* additionalBits, - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) +static size_t +ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, + const BYTE* codeTable, size_t nbSeq, unsigned maxCode, + const FSE_CTable* fseCTable, + const U8* additionalBits, + short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + void* workspace, size_t wkspSize) { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; @@ -3297,99 +3731,107 @@ } /* Returns the size estimate for the sequences section (header + content) of a block */ -static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - void* workspace, size_t wkspSize, - int writeEntropy) +static size_t +ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) { size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ); size_t cSeqSizeEstimate = 0; cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff, - fseTables->offcodeCTable, NULL, - OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, - workspace, wkspSize); + fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL, - fseTables->litlengthCTable, LL_bits, - LL_defaultNorm, LL_defaultNormLog, MaxLL, - workspace, wkspSize); + fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + workspace, wkspSize); cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML, - fseTables->matchlengthCTable, ML_bits, - ML_defaultNorm, ML_defaultNormLog, MaxML, - workspace, wkspSize); + fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, MaxML, + workspace, wkspSize); if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; return cSeqSizeEstimate + sequencesSectionHeaderSize; } /* Returns the size estimate for a given stream of literals, of, ll, ml */ -static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, - const BYTE* ofCodeTable, - const BYTE* llCodeTable, - const BYTE* mlCodeTable, - size_t nbSeq, - const ZSTD_entropyCTables_t* entropy, - const ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize, - int writeLitEntropy, int writeSeqEntropy) { +static size_t +ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) +{ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize, - &entropy->huf, &entropyMetadata->hufMetadata, - workspace, wkspSize, writeLitEntropy); + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, - nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, - workspace, wkspSize, writeSeqEntropy); + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); return seqSize + literalsSize + ZSTD_blockHeaderSize; } /* Builds entropy statistics and uses them for blocksize estimation. * - * Returns the estimated compressed size of the seqStore, or a zstd error. + * @return: estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; +static size_t +ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) +{ + ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); - return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), + zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), ""); + return ZSTD_estimateBlockSize( + seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + &zc->blockState.nextCBlock->entropy, + entropyMetadata, + zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) +{ size_t literalsBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; + seqDef const seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { literalsBytes += 0x10000; - } - } + } } return literalsBytes; } /* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { +static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) +{ size_t matchBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { seqDef seq = seqStore->sequencesStart[i]; matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; - } - } + } } return matchBytes; } @@ -3398,15 +3840,12 @@ */ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, const seqStore_t* originalSeqStore, - size_t startIdx, size_t endIdx) { - BYTE* const litEnd = originalSeqStore->lit; - size_t literalsBytes; - size_t literalsBytesPreceding = 0; - + size_t startIdx, size_t endIdx) +{ *resultSeqStore = *originalSeqStore; if (startIdx > 0) { resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx; - literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); } /* Move longLengthPos into the correct position if necessary */ @@ -3419,13 +3858,12 @@ } resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; - literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); - resultSeqStore->litStart += literalsBytesPreceding; if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) { /* This accounts for possible last literals if the derived chunk reaches the end of the block */ - resultSeqStore->lit = litEnd; + assert(resultSeqStore->lit == originalSeqStore->lit); } else { - resultSeqStore->lit = resultSeqStore->litStart+literalsBytes; + size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore); + resultSeqStore->lit = resultSeqStore->litStart + literalsBytes; } resultSeqStore->llCode += startIdx; resultSeqStore->mlCode += startIdx; @@ -3433,20 +3871,26 @@ } /** - * Returns the raw offset represented by the combination of offCode, ll0, and repcode history. - * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq(). + * Returns the raw offset represented by the combination of offBase, ll0, and repcode history. + * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq(). */ static U32 -ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) -{ - U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */ - assert(STORED_IS_REPCODE(offCode)); - if (adjustedOffCode == ZSTD_REP_NUM) { - /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */ - assert(rep[0] > 0); +ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0) +{ + U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */ + assert(OFFBASE_IS_REPCODE(offBase)); + if (adjustedRepCode == ZSTD_REP_NUM) { + assert(ll0); + /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 + * This is only valid if it results in a valid offset value, aka > 0. + * Note : it may happen that `rep[0]==1` in exceptional circumstances. + * In which case this function will return 0, which is an invalid offset. + * It's not an issue though, since this value will be + * compared and discarded within ZSTD_seqStore_resolveOffCodes(). + */ return rep[0] - 1; } - return rep[adjustedOffCode]; + return rep[adjustedRepCode]; } /** @@ -3462,30 +3906,33 @@ * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ -static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, - seqStore_t* const seqStore, U32 const nbSeq) { +static void +ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, + const seqStore_t* const seqStore, U32 const nbSeq) +{ U32 idx = 0; + U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; for (; idx < nbSeq; ++idx) { seqDef* const seq = seqStore->sequencesStart + idx; - U32 const ll0 = (seq->litLength == 0); - U32 const offCode = OFFBASE_TO_STORED(seq->offBase); - assert(seq->offBase > 0); - if (STORED_IS_REPCODE(offCode)) { - U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); - U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); + U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); + U32 const offBase = seq->offBase; + assert(offBase > 0); + if (OFFBASE_IS_REPCODE(offBase)) { + U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0); + U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0); /* Adjust simulated decompression repcode history if we come across a mismatch. Replace * the repcode with the offset it actually references, determined by the compression * repcode history. */ if (dRawOffset != cRawOffset) { - seq->offBase = cRawOffset + ZSTD_REP_NUM; + seq->offBase = OFFSET_TO_OFFBASE(cRawOffset); } } /* Compression repcode history is always updated with values directly from the unmodified seqStore. * Decompression repcode history may use modified seq->offset value taken from compression repcode history. */ - ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0); - ZSTD_updateRep(cRepcodes->rep, offCode, ll0); + ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0); + ZSTD_updateRep(cRepcodes->rep, offBase, ll0); } } @@ -3495,10 +3942,11 @@ * Returns the total size of that block (including header) or a ZSTD error code. */ static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, + const seqStore_t* const seqStore, repcodes_t* const dRep, repcodes_t* const cRep, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, + const void* src, size_t srcSize, U32 lastBlock, U32 isPartition) { const U32 rleMaxLength = 25; @@ -3572,45 +4020,49 @@ /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. - * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then - * we do not recurse. + * If advantageous to split, then we recurse down the two sub-blocks. + * If not, or if an error occurred in estimation, then we do not recurse. * - * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING. + * Note: The recursion depth is capped by a heuristic minimum number of sequences, + * defined by MIN_SEQUENCES_BLOCK_SPLITTING. * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * - * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. + * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { - seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; + seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; + DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + assert(endIdx >= startIdx); if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { - DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx); return; } - DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); - DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", + DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { return; } if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) { + DEBUGLOG(5, "split decided at seqNb:%zu", midIdx); ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); splits->splitLocations[splits->idx] = (U32)midIdx; splits->idx++; @@ -3618,14 +4070,18 @@ } } -/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio. +/* Base recursive function. + * Populates a table with intra-block partition indices that can improve compression ratio. * - * Returns the number of splits made (which equals the size of the partition table - 1). + * @return: number of splits made (which equals the size of the partition table - 1). */ -static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) { - seqStoreSplits splits = {partitions, 0}; +static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) +{ + seqStoreSplits splits; + splits.splitLocations = partitions; + splits.idx = 0; if (nbSeq <= 4) { - DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split"); + DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq); /* Refuse to try and split anything with less than 4 sequences */ return 0; } @@ -3641,18 +4097,20 @@ * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ static size_t -ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, - const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t blockSize, + U32 lastBlock, U32 nbSeq) { size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; size_t i = 0; size_t srcBytesTotal = 0; - U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ - seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; - seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; - size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; + size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3674,30 +4132,31 @@ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate); if (numSplits == 0) { - size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, - &dRep, &cRep, - op, dstCapacity, - ip, blockSize, - lastBlock, 0 /* isPartition */); + size_t cSizeSingleBlock = + ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore, + &dRep, &cRep, + op, dstCapacity, + ip, blockSize, + lastBlock, 0 /* isPartition */); FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); - assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX); + assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize); return cSizeSingleBlock; } ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); for (i = 0; i <= numSplits; ++i) { - size_t srcBytes; size_t cSizeChunk; U32 const lastPartition = (i == numSplits); U32 lastBlockEntireSrc = 0; - srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); + size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ @@ -3712,7 +4171,8 @@ op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); + DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size", + ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; @@ -3720,10 +4180,10 @@ dstCapacity -= cSizeChunk; cSize += cSizeChunk; *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); + assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize); } - /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes - * for the next block. + /* cRep and dRep may have diverged during the compression. + * If so, we use the dRep repcodes for the next block. */ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); return cSize; @@ -3734,8 +4194,6 @@ void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { - const BYTE* ip = (const BYTE*)src; - BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); @@ -3746,7 +4204,7 @@ if (bss == ZSTDbss_noCompress) { if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); return cSize; @@ -3764,9 +4222,9 @@ void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { - /* This the upper bound for the length of an rle block. - * This isn't the actual upper bound. Finding the real threshold - * needs further investigation. + /* This is an estimated upper bound for the length of an rle block. + * This isn't the actual upper bound. + * Finding the real threshold needs further investigation. */ const U32 rleMaxLength = 25; size_t cSize; @@ -3858,10 +4316,11 @@ * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ - { - size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + { size_t const cSize = + ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); if (cSize != ERROR(dstSize_tooSmall)) { - size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + size_t const maxCSize = + srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); @@ -3869,7 +4328,7 @@ } } } - } + } /* if (bss == ZSTDbss_compress)*/ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. @@ -3927,7 +4386,7 @@ * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) -* @return : compressed size, or an error code +* @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, @@ -3951,7 +4410,9 @@ ZSTD_matchState_t* const ms = &cctx->blockState.matchState; U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, dstSize_tooSmall, "not enough space to store compressed block"); if (remaining < blockSize) blockSize = remaining; @@ -3990,7 +4451,7 @@ MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } - } + } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ ip += blockSize; @@ -4169,31 +4630,51 @@ } } -size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } +/* NOTE: Must just wrap ZSTD_compressContinue_public() */ +size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize); +} -size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx) { ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; assert(!ZSTD_checkCParams(cParams)); - return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); + return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog); } -size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */ +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ + return ZSTD_getBlockSize_deprecated(cctx); +} + +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); - { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); + { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx); RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } +/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */ +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize); +} + /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ @@ -4202,25 +4683,36 @@ ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm) + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; - /* Assert that we the ms params match the params we're being given */ + /* Assert that the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); - if (srcSize > ZSTD_CHUNKSIZE_MAX) { + { /* Ensure large dictionaries can't cause index overflow */ + /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX. * Dictionaries right at the edge will immediately trigger overflow * correction, but I don't want to insert extra constraints here. */ - U32 const maxDictSize = ZSTD_CURRENT_MAX - 1; - /* We must have cleared our windows when our source is this large. */ - assert(ZSTD_window_isEmpty(ms->window)); - if (loadLdmDict) - assert(ZSTD_window_isEmpty(ls->window)); + U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX; + + int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams); + if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) { + /* Some dictionary matchfinders in zstd use "short cache", + * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each + * CDict hashtable entry as a tag rather than as part of an index. + * When short cache is used, we need to truncate the dictionary + * so that its indices don't overlap with the tag. */ + U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX; + maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize); + assert(!loadLdmDict); + } + /* If the dictionary is too large, only load the suffix of the dictionary. */ if (srcSize > maxDictSize) { ip = iend - maxDictSize; @@ -4229,30 +4721,46 @@ } } - DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); + if (srcSize > ZSTD_CHUNKSIZE_MAX) { + /* We must have cleared our windows when our source is this large. */ + assert(ZSTD_window_isEmpty(ms->window)); + if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window)); + } ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0); - ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); - ms->forceNonContiguous = params->deterministicRefPrefix; - if (loadLdmDict) { + DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); + + if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); + ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); + } + + /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ + if (params->cParams.strategy < ZSTD_btultra) { + U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28); + if (srcSize > maxDictSize) { + ip = iend - maxDictSize; + src = ip; + srcSize = maxDictSize; + } } + ms->nextToUpdate = (U32)(ip - ms->window.base); + ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); + ms->forceNonContiguous = params->deterministicRefPrefix; + if (srcSize <= HASH_READ_SIZE) return 0; ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend); - if (loadLdmDict) - ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); - switch(params->cParams.strategy) { case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, dtlm); + ZSTD_fillHashTable(ms, iend, dtlm, tfp); break; case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, dtlm); + ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp); break; case ZSTD_greedy: @@ -4265,7 +4773,7 @@ } else { assert(params->useRowMatchFinder != ZSTD_ps_auto); if (params->useRowMatchFinder == ZSTD_ps_enable) { - size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); + size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog); ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); DEBUGLOG(4, "Using row-based hash table for lazy dict"); @@ -4418,6 +4926,7 @@ ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; @@ -4436,7 +4945,7 @@ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( - ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); + ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), ""); } return dictID; } @@ -4452,6 +4961,7 @@ const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp, void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); @@ -4464,13 +4974,13 @@ /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) - return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); + return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_auto) { DEBUGLOG(4, "raw content dictionary detected"); return ZSTD_loadDictionaryContent( - ms, ls, ws, params, dict, dictSize, dtlm); + ms, ls, ws, params, dict, dictSize, dtlm, tfp); } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ @@ -4478,13 +4988,14 @@ /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( - bs, ms, ws, params, dict, dictSize, dtlm, workspace); + bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace); } #define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) #define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL) /*! ZSTD_compressBegin_internal() : + * Assumption : either @dict OR @cdict (or none) is non-NULL, never both * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, @@ -4520,11 +5031,11 @@ cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, - cctx->entropyWorkspace) + ZSTD_tfp_forCCtx, cctx->entropyWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, - dictContentType, dtlm, cctx->entropyWorkspace); + dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; @@ -4565,11 +5076,11 @@ &cctxParams, pledgedSrcSize); } -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +static size_t +ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_CCtx_params cctxParams; - { - ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); + { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict); ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); } DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); @@ -4577,9 +5088,15 @@ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } +size_t +ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel); +} + size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { - return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); + return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel); } @@ -4649,9 +5166,9 @@ #endif } -size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, @@ -4675,6 +5192,14 @@ return cSize + endResult; } +/* NOTE: Must just wrap ZSTD_compressEnd_public() */ +size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); +} + size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, @@ -4703,7 +5228,7 @@ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, @@ -4828,7 +5353,7 @@ { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, ¶ms, cdict->dictContent, cdict->dictContentSize, - dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); + dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; @@ -5025,6 +5550,7 @@ params.cParams = cParams; params.useRowMatchFinder = useRowMatchFinder; cdict->useRowMatchFinder = useRowMatchFinder; + cdict->compressionLevel = ZSTD_NO_CLEVEL; if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, @@ -5104,12 +5630,17 @@ /* ZSTD_compressBegin_usingCDict() : * cdict must be != NULL */ -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); } +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict); +} + /*! ZSTD_compress_usingCDict_internal(): * Implementation of various ZSTD_compress_usingCDict* functions. */ @@ -5119,7 +5650,7 @@ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict_advanced(): @@ -5316,30 +5847,41 @@ static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { - size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; - if (hintInSize==0) hintInSize = cctx->blockSize; - return hintInSize; + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { + return cctx->blockSize - cctx->stableIn_notConsumed; + } + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); + { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize==0) hintInSize = cctx->blockSize; + return hintInSize; + } } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants - * non-static, because can be called from zstdmt_compress.c - * @return : hint size for next input */ + * @return : hint size for next input to complete ongoing block */ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { - const char* const istart = (const char*)input->src; - const char* const iend = input->size != 0 ? istart + input->size : istart; - const char* ip = input->pos != 0 ? istart + input->pos : istart; - char* const ostart = (char*)output->dst; - char* const oend = output->size != 0 ? ostart + output->size : ostart; - char* op = output->pos != 0 ? ostart + output->pos : ostart; + const char* const istart = (assert(input != NULL), (const char*)input->src); + const char* const iend = (istart != NULL) ? istart + input->size : istart; + const char* ip = (istart != NULL) ? istart + input->pos : istart; + char* const ostart = (assert(output != NULL), (char*)output->dst); + char* const oend = (ostart != NULL) ? ostart + output->size : ostart; + char* op = (ostart != NULL) ? ostart + output->pos : ostart; U32 someMoreWork = 1; /* check expectations */ - DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); + DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos); + assert(zcs != NULL); + if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) { + assert(input->pos >= zcs->stableIn_notConsumed); + input->pos -= zcs->stableIn_notConsumed; + ip -= zcs->stableIn_notConsumed; + zcs->stableIn_notConsumed = 0; + } if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); @@ -5348,8 +5890,10 @@ assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); } - assert(output->pos <= output->size); + if (input->src == NULL) assert(input->size == 0); assert(input->pos <= input->size); + if (output->dst == NULL) assert(output->size == 0); + assert(output->pos <= output->size); assert((U32)flushMode <= (U32)ZSTD_e_end); while (someMoreWork) { @@ -5364,7 +5908,7 @@ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ - size_t const cSize = ZSTD_compressEnd(zcs, + size_t const cSize = ZSTD_compressEnd_public(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); @@ -5381,8 +5925,7 @@ zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; - if (loaded != 0) - ip += loaded; + if (ip) ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ @@ -5393,6 +5936,20 @@ /* empty */ someMoreWork = 0; break; } + } else { + assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + if ( (flushMode == ZSTD_e_continue) + && ( (size_t)(iend - ip) < zcs->blockSize) ) { + /* can't compress a full block : stop here */ + zcs->stableIn_notConsumed = (size_t)(iend - ip); + ip = iend; /* pretend to have consumed input */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (ip == iend) ) { + /* empty */ + someMoreWork = 0; break; + } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); @@ -5400,9 +5957,8 @@ void* cDst; size_t cSize; size_t oSize = oend-op; - size_t const iSize = inputBuffered - ? zcs->inBuffPos - zcs->inToCompress - : MIN((size_t)(iend - ip), zcs->blockSize); + size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress + : MIN((size_t)(iend - ip), zcs->blockSize); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else @@ -5410,9 +5966,9 @@ if (inputBuffered) { unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); cSize = lastBlock ? - ZSTD_compressEnd(zcs, cDst, oSize, + ZSTD_compressEnd_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : - ZSTD_compressContinue(zcs, cDst, oSize, + ZSTD_compressContinue_public(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; @@ -5425,19 +5981,16 @@ if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; - } else { - unsigned const lastBlock = (ip + iSize == iend); - assert(flushMode == ZSTD_e_end /* Already validated */); + } else { /* !inputBuffered, hence ZSTD_bm_stable */ + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend); cSize = lastBlock ? - ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) : - ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize); + ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) : + ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize); /* Consume the input prior to error checking to mirror buffered mode. */ - if (iSize > 0) - ip += iSize; + if (ip) ip += iSize; FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; - if (lastBlock) - assert(ip == iend); + if (lastBlock) assert(ip == iend); } if (cDst == op) { /* no need to flush */ op += cSize; @@ -5513,8 +6066,10 @@ /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ -static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input) +static void +ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input) { + DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)"); if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { cctx->expectedInBuffer = *input; } @@ -5533,22 +6088,22 @@ { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { ZSTD_inBuffer const expect = cctx->expectedInBuffer; - if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size) - RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!"); - if (endOp != ZSTD_e_end) - RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!"); + if (expect.src != input->src || expect.pos != input->pos) + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!"); } + (void)endOp; if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) { size_t const outBufferSize = output->size - output->pos; if (cctx->expectedOutBufferSize != outBufferSize) - RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!"); + RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!"); } return 0; } static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, - size_t inSize) { + size_t inSize) +{ ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ @@ -5562,9 +6117,9 @@ params.compressionLevel = cctx->cdict->compressionLevel; } DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); - if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */ - { - size_t const dictSize = prefixDict.dict + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ + + { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); @@ -5576,8 +6131,18 @@ params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); + params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); + params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize); + params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel); #ifdef ZSTD_MULTITHREAD + /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */ + RETURN_ERROR_IF( + params.useSequenceProducer == 1 && params.nbWorkers >= 1, + parameter_combination_unsupported, + "External sequence producer isn't supported with nbWorkers >= 1" + ); + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ } @@ -5605,7 +6170,7 @@ cctx->streamStage = zcss_load; cctx->appliedParams = params; } else -#endif +#endif /* ZSTD_MULTITHREAD */ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, @@ -5631,6 +6196,8 @@ return 0; } +/* @return provides a minimum amount of data remaining to be flushed from internal buffers + */ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, @@ -5645,8 +6212,27 @@ /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { - FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed"); - ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ + size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */ + size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed; + if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */ + && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */ + && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */ + if (cctx->stableIn_notConsumed) { /* not the first time */ + /* check stable source guarantees */ + RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer"); + RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos"); + } + /* pretend input was consumed, to give a sense forward progress */ + input->pos = input->size; + /* save stable inBuffer, for later control, and flush/end */ + cctx->expectedInBuffer = *input; + /* but actually input wasn't consumed, so keep track of position from where compression shall resume */ + cctx->stableIn_notConsumed += inputSize; + /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */ + return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */ + } + FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed"); + ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */ } /* end of transparent initialization stage */ @@ -5659,6 +6245,13 @@ ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); cctx->cParamsChanged = 0; } + if (cctx->stableIn_notConsumed) { + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable); + /* some early data was skipped - make it available for consumption */ + assert(input->pos >= cctx->stableIn_notConsumed); + input->pos -= cctx->stableIn_notConsumed; + cctx->stableIn_notConsumed = 0; + } for (;;) { size_t const ipos = input->pos; size_t const opos = output->pos; @@ -5697,7 +6290,7 @@ ZSTD_setBufferExpectations(cctx, output, input); return flushMin; } -#endif +#endif /* ZSTD_MULTITHREAD */ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); DEBUGLOG(5, "completed ZSTD_compressStream2"); ZSTD_setBufferExpectations(cctx, output, input); @@ -5710,13 +6303,20 @@ const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } size_t ZSTD_compress2(ZSTD_CCtx* cctx, @@ -5739,6 +6339,7 @@ /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; + FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); @@ -5749,64 +6350,61 @@ } } -typedef struct { - U32 idx; /* Index in array of ZSTD_Sequence */ - U32 posInSequence; /* Position within sequence at idx */ - size_t posInSrc; /* Number of bytes given by sequences provided so far */ -} ZSTD_sequencePosition; - /* ZSTD_validateSequence() : * @offCode : is presumed to follow format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, - size_t posInSrc, U32 windowLog, size_t dictSize) +ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch, + size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) { - U32 const windowSize = 1 << windowLog; - /* posInSrc represents the amount of data the the decoder would decode up to this point. + U32 const windowSize = 1u << windowLog; + /* posInSrc represents the amount of data the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; - RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); - RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); + size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; + RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); + /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ + RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); return 0; } /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ -static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) +static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { - U32 offCode = STORE_OFFSET(rawOffset); + U32 offBase = OFFSET_TO_OFFBASE(rawOffset); if (!ll0 && rawOffset == rep[0]) { - offCode = STORE_REPCODE_1; + offBase = REPCODE1_TO_OFFBASE; } else if (rawOffset == rep[1]) { - offCode = STORE_REPCODE(2 - ll0); + offBase = REPCODE_TO_OFFBASE(2 - ll0); } else if (rawOffset == rep[2]) { - offCode = STORE_REPCODE(3 - ll0); + offBase = REPCODE_TO_OFFBASE(3 - ll0); } else if (ll0 && rawOffset == rep[0] - 1) { - offCode = STORE_REPCODE_3; + offBase = REPCODE3_TO_OFFBASE; } - return offCode; + return offBase; } -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. - */ -static size_t +size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) + const void* src, size_t blockSize, + ZSTD_paramSwitch_e externalRepSearch) { U32 idx = seqPos->idx; + U32 const startIdx = idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; repcodes_t updatedRepcodes; U32 dictSize; + DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize); + if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -5815,25 +6413,55 @@ dictSize = 0; } ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); - for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { U32 const litLength = inSeqs[idx].litLength; - U32 const ll0 = (litLength == 0); U32 const matchLength = inSeqs[idx].matchLength; - U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + U32 offBase; - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); + if (externalRepSearch == ZSTD_ps_disable) { + offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset); + } else { + U32 const ll0 = (litLength == 0); + offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); + } + + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer), "Sequence validation failed"); } - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; } + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + assert(externalRepSearch != ZSTD_ps_auto); + assert(idx >= startIdx); + if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) { + U32* const rep = updatedRepcodes.rep; + U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */ + + if (lastSeqIdx >= startIdx + 2) { + rep[2] = inSeqs[lastSeqIdx - 2].offset; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else if (lastSeqIdx == startIdx + 1) { + rep[2] = rep[0]; + rep[1] = inSeqs[lastSeqIdx - 1].offset; + rep[0] = inSeqs[lastSeqIdx].offset; + } else { + assert(lastSeqIdx == startIdx); + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = inSeqs[lastSeqIdx].offset; + } + } + ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); if (inSeqs[idx].litLength) { @@ -5842,26 +6470,15 @@ ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } - RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); + RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; return 0; } -/* Returns the number of bytes to move the current read position back by. Only non-zero - * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something - * went wrong. - * - * This function will attempt to scan through blockSize bytes represented by the sequences - * in inSeqs, storing any (partial) sequences. - * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. - */ -static size_t +size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; @@ -5873,6 +6490,9 @@ U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; + /* TODO(embg) support fast parsing mode in noBlockDelim mode */ + (void)externalRepSearch; + if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { @@ -5880,7 +6500,7 @@ } else { dictSize = 0; } - DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); + DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { @@ -5888,7 +6508,7 @@ U32 litLength = currSeq.litLength; U32 matchLength = currSeq.matchLength; U32 const rawOffset = currSeq.offset; - U32 offCode; + U32 offBase; /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { @@ -5902,7 +6522,6 @@ /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; - idx++; } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ @@ -5942,21 +6561,23 @@ } /* Check if this offset can be represented with a repcode */ { U32 const ll0 = (litLength == 0); - offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); - ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); } if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), + FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, + cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer), "Sequence validation failed"); } - DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, + DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); + RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; + if (!finalMatchSplit) + idx++; /* Next Sequence */ } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); @@ -5979,7 +6600,7 @@ typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize); + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { ZSTD_sequenceCopier sequenceCopier = NULL; @@ -5993,6 +6614,57 @@ return sequenceCopier; } +/* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ +static size_t +blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) +{ + int end = 0; + size_t blockSize = 0; + size_t spos = seqPos.idx; + DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize); + assert(spos <= inSeqsSize); + while (spos < inSeqsSize) { + end = (inSeqs[spos].offset == 0); + blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength; + if (end) { + if (inSeqs[spos].matchLength != 0) + RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0"); + break; + } + spos++; + } + if (!end) + RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); + return blockSize; +} + +/* More a "target" block size */ +static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining) +{ + int const lastBlock = (remaining <= blockSize); + return lastBlock ? remaining : blockSize; +} + +static size_t determine_blockSize(ZSTD_sequenceFormat_e mode, + size_t blockSize, size_t remaining, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) +{ + DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); + if (mode == ZSTD_sf_noBlockDelimiters) + return blockSize_noDelimiter(blockSize, remaining); + { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); + if (explicitBlockSize > blockSize) + RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block"); + if (explicitBlockSize > remaining) + RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source"); + return explicitBlockSize; + } +} + /* Compress, block-by-block, all of the sequences given. * * Returns the cumulative size of all compressed blocks (including their headers), @@ -6005,9 +6677,6 @@ const void* src, size_t srcSize) { size_t cSize = 0; - U32 lastBlock; - size_t blockSize; - size_t compressedSeqsSize; size_t remaining = srcSize; ZSTD_sequencePosition seqPos = {0, 0, 0}; @@ -6027,22 +6696,29 @@ } while (remaining) { + size_t compressedSeqsSize; size_t cBlockSize; size_t additionalByteAdjustment; - lastBlock = remaining <= cctx->blockSize; - blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; + size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, + cctx->blockSize, remaining, + inSeqs, inSeqsSize, seqPos); + U32 const lastBlock = (blockSize == remaining); + FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); + assert(blockSize <= remaining); ZSTD_resetSeqStore(&cctx->seqStore); - DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); + DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize); - additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); + additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); blockSize -= additionalByteAdjustment; /* If blocks are too small, emit as a nocompress block */ - if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) { cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); + DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; @@ -6051,6 +6727,7 @@ continue; } + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, @@ -6059,11 +6736,11 @@ cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); - DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); + DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && - ZSTD_isRLE((BYTE const*)src, srcSize)) { + ZSTD_isRLE(ip, blockSize)) { /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." * This is only an issue for zstd <= v1.4.3 @@ -6074,12 +6751,12 @@ if (compressedSeqsSize == 0) { /* ZSTD_noCompressBlock writes the block header as well */ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed"); + DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize); } else if (compressedSeqsSize == 1) { cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock); - FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed"); - DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize); + FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed"); + DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize); } else { U32 cBlockHeader; /* Error checking and repcodes update */ @@ -6091,11 +6768,10 @@ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; - DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize); + DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); } cSize += cBlockSize; - DEBUGLOG(4, "cSize running total: %zu", cSize); if (lastBlock) { break; @@ -6106,12 +6782,15 @@ dstCapacity -= cBlockSize; cctx->isFirstBlock = 0; } + DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); } + DEBUGLOG(4, "cSize final total: %zu", cSize); return cSize; } -size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, +size_t ZSTD_compressSequences(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { @@ -6121,7 +6800,7 @@ size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ - DEBUGLOG(3, "ZSTD_compressSequences()"); + DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); /* Begin writing output, starting with frame header */ @@ -6149,26 +6828,34 @@ cSize += 4; } - DEBUGLOG(3, "Final compressed size: %zu", cSize); + DEBUGLOG(4, "Final compressed size: %zu", cSize); return cSize; } /*====== Finalize ======*/ +static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs) +{ + const ZSTD_inBuffer nullInput = { NULL, 0, 0 }; + const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + return stableInput ? zcs->expectedInBuffer : nullInput; +} + /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); + input.size = input.pos; /* do not ingest more input during flush */ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; + ZSTD_inBuffer input = inBuffer_forEndFlush(zcs); size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); - FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); + FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed"); if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; @@ -6290,7 +6977,7 @@ cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ - return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode); + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto); } } @@ -6325,3 +7012,21 @@ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } + +void ZSTD_registerSequenceProducer( + ZSTD_CCtx* zc, void* mState, + ZSTD_sequenceProducer_F* mFinder +) { + if (mFinder != NULL) { + ZSTD_externalMatchCtx emctx; + emctx.mState = mState; + emctx.mFinder = mFinder; + emctx.seqBuffer = NULL; + emctx.seqBufferCapacity = 0; + zc->externalMatchCtx = emctx; + zc->requestedParams.useSequenceProducer = 1; + } else { + ZSTD_memset(&zc->externalMatchCtx, 0, sizeof(zc->externalMatchCtx)); + zc->requestedParams.useSequenceProducer = 0; + } +} diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_internal.h b/sys/contrib/zstd/lib/compress/zstd_compress_internal.h --- a/sys/contrib/zstd/lib/compress/zstd_compress_internal.h +++ b/sys/contrib/zstd/lib/compress/zstd_compress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,6 +23,7 @@ #ifdef ZSTD_MULTITHREAD # include "zstdmt_compress.h" #endif +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ #if defined (__cplusplus) extern "C" { @@ -117,12 +118,13 @@ /** ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * @return : 0 on success or error code */ -size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, - const ZSTD_CCtx_params* cctxParams, - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize); +size_t ZSTD_buildBlockEntropyStats( + const seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize); /********************************* * Compression internals structs * @@ -148,6 +150,12 @@ size_t capacity; /* The capacity starting from `seq` pointer */ } rawSeqStore_t; +typedef struct { + U32 idx; /* Index in array of ZSTD_Sequence */ + U32 posInSequence; /* Position within sequence at idx */ + size_t posInSrc; /* Number of bytes given by sequences provided so far */ +} ZSTD_sequencePosition; + UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; typedef struct { @@ -218,8 +226,10 @@ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/ - U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ + BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */ + U64 hashSalt; /* For row-based matchFinder: salts the hash for re-use of tag table */ + U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */ U32* hashTable; U32* hashTable3; @@ -234,6 +244,18 @@ const ZSTD_matchState_t* dictMatchState; ZSTD_compressionParameters cParams; const rawSeqStore_t* ldmSeqStore; + + /* Controls prefetching in some dictMatchState matchfinders. + * This behavior is controlled from the cctx ms. + * This parameter has no effect in the cdict ms. */ + int prefetchCDictTables; + + /* When == 0, lazy match finders insert every position. + * When != 0, lazy match finders only insert positions they search. + * This allows them to skip much faster over incompressible data, + * at a small cost to compression ratio. + */ + int lazySkipping; }; typedef struct { @@ -330,6 +352,24 @@ /* Internal use, for createCCtxParams() and freeCCtxParams() only */ ZSTD_customMem customMem; + + /* Controls prefetching in some dictMatchState matchfinders */ + ZSTD_paramSwitch_e prefetchCDictTables; + + /* Controls whether zstd will fall back to an internal matchfinder + * if the external matchfinder returns an error code. */ + int enableMatchFinderFallback; + + /* Indicates whether an external matchfinder has been referenced. + * Users can't set this externally. + * It is set internally in ZSTD_registerSequenceProducer(). */ + int useSequenceProducer; + + /* Adjust the max block size*/ + size_t maxBlockSize; + + /* Controls repcode search in external sequence parsing */ + ZSTD_paramSwitch_e searchForExternalRepcodes; }; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */ #define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) @@ -361,6 +401,14 @@ ZSTD_entropyCTablesMetadata_t entropyMetadata; } ZSTD_blockSplitCtx; +/* Context for block-level external matchfinder API */ +typedef struct { + void* mState; + ZSTD_sequenceProducer_F* mFinder; + ZSTD_Sequence* seqBuffer; + size_t seqBufferCapacity; +} ZSTD_externalMatchCtx; + struct ZSTD_CCtx_s { ZSTD_compressionStage_e stage; int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */ @@ -410,6 +458,7 @@ /* Stable in/out buffer verification */ ZSTD_inBuffer expectedInBuffer; + size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */ size_t expectedOutBufferSize; /* Dictionary */ @@ -429,9 +478,13 @@ /* Workspace for block splitter */ ZSTD_blockSplitCtx blockSplitCtx; + + /* Workspace for external matchfinder */ + ZSTD_externalMatchCtx externalMatchCtx; }; typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e; +typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e; typedef enum { ZSTD_noDict = 0, @@ -453,7 +506,7 @@ * In this mode we take both the source size and the dictionary size * into account when selecting and adjusting the parameters. */ - ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. + ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams. * We don't know what these parameters are for. We default to the legacy * behavior of taking both the source size and the dict size into account * when selecting and adjusting parameters. @@ -512,9 +565,11 @@ /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ -MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) +MEM_STATIC size_t +ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); + DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity); RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, dstSize_tooSmall, "dst buf too small for uncompressed block"); MEM_writeLE24(dst, cBlockHeader24); @@ -522,7 +577,8 @@ return ZSTD_blockHeaderSize + srcSize; } -MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) +MEM_STATIC size_t +ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) { BYTE* const op = (BYTE*)dst; U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); @@ -541,7 +597,7 @@ { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); - assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); return (srcSize >> minlog) + 2; } @@ -577,29 +633,27 @@ while (ip < iend) *op++ = *ip++; } -#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) -#define STORE_REPCODE_1 STORE_REPCODE(1) -#define STORE_REPCODE_2 STORE_REPCODE(2) -#define STORE_REPCODE_3 STORE_REPCODE(3) -#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1) -#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE) -#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE) -#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE) -#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE) -#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */ -#define STORED_TO_OFFBASE(o) ((o)+1) -#define OFFBASE_TO_STORED(o) ((o)-1) + +#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1) +#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2) +#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3) +#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */ +#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM) +#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM) +#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) +#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) +#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t. - * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET(). + * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). * @matchLength : must be >= MINMATCH - * Allowed to overread literals up to litLimit. + * Allowed to over-read literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, - U32 offBase_minus1, + U32 offBase, size_t matchLength) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; @@ -608,8 +662,8 @@ static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); - DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", - pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1); + DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u", + pos, (U32)litLength, (U32)matchLength, (U32)offBase); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); @@ -619,9 +673,9 @@ assert(literals + litLength <= litLimit); if (litEnd <= litLimit_w) { /* Common case we can use wildcopy. - * First copy 16 bytes, because literals are likely short. - */ - assert(WILDCOPY_OVERLENGTH >= 16); + * First copy 16 bytes, because literals are likely short. + */ + ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16); ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); @@ -640,7 +694,7 @@ seqStorePtr->sequences[0].litLength = (U16)litLength; /* match offset */ - seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1); + seqStorePtr->sequences[0].offBase = offBase; /* match Length */ assert(matchLength >= MINMATCH); @@ -658,17 +712,17 @@ /* ZSTD_updateRep() : * updates in-place @rep (array of repeat offsets) - * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq() + * @offBase : sum-type, using numeric representation of ZSTD_storeSeq() */ MEM_STATIC void -ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { - if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */ + if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */ rep[2] = rep[1]; rep[1] = rep[0]; - rep[0] = STORED_OFFSET(offBase_minus1); + rep[0] = OFFBASE_TO_OFFSET(offBase); } else { /* repcode */ - U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0; + U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; if (repCode > 0) { /* note : if repCode==0, no change */ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; rep[2] = (repCode >= 2) ? rep[1] : rep[2]; @@ -685,11 +739,11 @@ } repcodes_t; MEM_STATIC repcodes_t -ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { repcodes_t newReps; ZSTD_memcpy(&newReps, rep, sizeof(newReps)); - ZSTD_updateRep(newReps.rep, offBase_minus1, ll0); + ZSTD_updateRep(newReps.rep, offBase, ll0); return newReps; } @@ -697,103 +751,6 @@ /*-************************************* * Match length counter ***************************************/ -static unsigned ZSTD_NbCommonBytes (size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _tzcnt_u64(val) >> 3; -# else - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, - 0, 3, 1, 3, 1, 4, 2, 7, - 0, 2, 3, 6, 1, 5, 3, 5, - 1, 3, 4, 4, 2, 5, 6, 7, - 7, 0, 1, 2, 3, 3, 4, 6, - 2, 6, 5, 5, 3, 4, 5, 6, - 7, 1, 2, 4, 6, 4, 4, 5, - 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, - 3, 2, 2, 1, 3, 2, 0, 1, - 3, 3, 1, 2, 2, 2, 2, 0, - 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) -# if STATIC_BMI2 - return _lzcnt_u64(val) >> 3; -# else - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# endif -# elif defined(__GNUC__) && (__GNUC__ >= 4) - return (__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, (unsigned long)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; @@ -839,32 +796,43 @@ * Hashes ***************************************/ static const U32 prime3bytes = 506832829U; -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; } -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ +static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */ +MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); } static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } +static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); } +static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); } static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } -static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } +static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); } static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } +static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); } static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } -static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } +static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); } static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } +static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); } +static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); } + MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + switch(mls) { default: @@ -876,6 +844,24 @@ } } +MEM_STATIC FORCE_INLINE_ATTR +size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) { + /* Although some of these hashes do support hBits up to 64, some do not. + * To be on the safe side, always avoid hBits > 32. */ + assert(hBits <= 32); + + switch(mls) + { + default: + case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt); + case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt); + case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt); + case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt); + case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt); + } +} + + /** ZSTD_ipow() : * Return base^exponent. */ @@ -1223,10 +1209,15 @@ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); - if (blockEndIdx > loadedDictEnd + maxDist) { + if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) { /* On reaching window size, dictionaries are invalidated. * For simplification, if window size is reached anywhere within next block, * the dictionary is invalidated for the full block. + * + * We also have to invalidate the dictionary if ZSTD_window_update() has detected + * non-contiguous segments, which means that loadedDictEnd != window->dictLimit. + * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use + * dictMatchState, so setting it to NULL is not a problem. */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; @@ -1358,6 +1349,42 @@ #endif +/* Short Cache */ + +/* Normally, zstd matchfinders follow this flow: + * 1. Compute hash at ip + * 2. Load index from hashTable[hash] + * 3. Check if *ip == *(base + index) + * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss. + * + * Short cache is an optimization which allows us to avoid step 3 most of the time + * when the data doesn't actually match. With short cache, the flow becomes: + * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip. + * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works. + * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue. + * + * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to + * dictMatchState matchfinders. + */ +#define ZSTD_SHORT_CACHE_TAG_BITS 8 +#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1) + +/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable. + * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */ +MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) { + size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK); + assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0); + hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag; +} + +/* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ +MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { + U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK; + U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK; + return tag1 == tag2; +} #if defined (__cplusplus) } @@ -1448,9 +1475,6 @@ /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ -/* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */ -ZSTDLIB_API -/* End FreeBSD */ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); /** ZSTD_CCtx_trace() : @@ -1458,4 +1482,51 @@ */ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. + * Note that the block delimiter must include the last literals of the block. + */ +size_t +ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, + ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); + +/* Returns the number of bytes to move the current read position back by. + * Only non-zero if we ended up splitting a sequence. + * Otherwise, it may return a ZSTD error if something went wrong. + * + * This function will attempt to scan through blockSize bytes + * represented by the sequences in @inSeqs, + * storing any (partial) sequences. + * + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to + * avoid splitting a match, or to avoid splitting a match such that it would produce a match + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. + */ +size_t +ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); + + +/* =============================================================== + * Deprecated definitions that are still used internally to avoid + * deprecation warnings. These functions are exactly equivalent to + * their public variants, but avoid the deprecation warnings. + * =============================================================== */ + +size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); + +size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + + #endif /* ZSTD_COMPRESS_H */ diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_literals.h b/sys/contrib/zstd/lib/compress/zstd_compress_literals.h --- a/sys/contrib/zstd/lib/compress/zstd_compress_literals.h +++ b/sys/contrib/zstd/lib/compress/zstd_compress_literals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,16 +16,24 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); +/* ZSTD_compressRleLiteralsBlock() : + * Conditions : + * - All bytes in @src are identical + * - dstCapacity >= 4 */ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); -/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, +/* ZSTD_compressLiterals(): + * @entropyWorkspace: must be aligned on 4-bytes boundaries + * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE + * @suspectUncompressible: sampling checks, to potentially skip huffman coding + */ +size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible); + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + int suspectUncompressible, + int bmi2); #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_literals.c b/sys/contrib/zstd/lib/compress/zstd_compress_literals.c --- a/sys/contrib/zstd/lib/compress/zstd_compress_literals.c +++ b/sys/contrib/zstd/lib/compress/zstd_compress_literals.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,11 +13,36 @@ ***************************************/ #include "zstd_compress_literals.h" + +/* ************************************************************** +* Debug Traces +****************************************************************/ +#if DEBUGLEVEL >= 2 + +static size_t showHexa(const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*)src; + size_t u; + for (u=0; u31) + (srcSize>4095); + DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity); + RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); switch(flSize) @@ -36,16 +61,30 @@ } ZSTD_memcpy(ostart + flSize, src, srcSize); - DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); + DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); return srcSize + flSize; } +static int allBytesIdentical(const void* src, size_t srcSize) +{ + assert(srcSize >= 1); + assert(src != NULL); + { const BYTE b = ((const BYTE*)src)[0]; + size_t p; + for (p=1; p31) + (srcSize>4095); - (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + assert(dstCapacity >= 4); (void)dstCapacity; + assert(allBytesIdentical(src, srcSize)); switch(flSize) { @@ -63,28 +102,51 @@ } ostart[flSize] = *(const BYTE*)src; - DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); + DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1); return flSize+1; } -size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, - ZSTD_hufCTables_t* nextHuf, - ZSTD_strategy strategy, int disableLiteralCompression, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2, - unsigned suspectUncompressible) +/* ZSTD_minLiteralsToCompress() : + * returns minimal amount of literals + * for literal compression to even be attempted. + * Minimum is made tighter as compression strategy increases. + */ +static size_t +ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat) +{ + assert((int)strategy >= 0); + assert((int)strategy <= 9); + /* btultra2 : min 8 bytes; + * then 2x larger for each successive compression strategy + * max threshold 64 bytes */ + { int const shift = MIN(9-(int)strategy, 3); + size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift; + DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc); + return mintc; + } +} + +size_t ZSTD_compressLiterals ( + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, + int disableLiteralCompression, + int suspectUncompressible, + int bmi2) { - size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; - DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", - disableLiteralCompression, (U32)srcSize); + DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)", + disableLiteralCompression, (U32)srcSize, dstCapacity); + + DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize)); /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); @@ -92,40 +154,51 @@ if (disableLiteralCompression) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - /* small ? don't even attempt compression (speed opt) */ -# define COMPRESS_LITERALS_SIZE_MIN 63 - { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; - if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + /* if too small, don't even attempt compression (speed opt) */ + if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode)) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); { HUF_repeat repeat = prevHuf->repeatMode; - int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + int const flags = 0 + | (bmi2 ? HUF_flags_bmi2 : 0) + | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0) + | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0) + | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0); + + typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int); + huf_compress_f huf_compress; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; - cLitSize = singleStream ? - HUF_compress1X_repeat( - ostart+lhSize, dstCapacity-lhSize, src, srcSize, - HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) : - HUF_compress4X_repeat( - ostart+lhSize, dstCapacity-lhSize, src, srcSize, - HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible); + huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat; + cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize, + src, srcSize, + HUF_SYMBOLVALUE_MAX, LitHufLog, + entropyWorkspace, entropyWorkspaceSize, + (HUF_CElt*)nextHuf->CTable, + &repeat, flags); + DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize); if (repeat != HUF_repeat_none) { /* reused the existing table */ - DEBUGLOG(5, "Reusing previous huffman table"); + DEBUGLOG(5, "reusing statistics from previous huffman block"); hType = set_repeat; } } - if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); - } + { size_t const minGain = ZSTD_minGain(srcSize, strategy); + if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } } if (cLitSize==1) { - ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); - } + /* A return value of 1 signals that the alphabet consists of a single symbol. + * However, in some rare circumstances, it could be the compressed size (a single byte). + * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`. + * (it's also necessary to not generate statistics). + * Therefore, in such a case, actively check that all bytes are identical. */ + if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) { + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } } if (hType == set_compressed) { /* using a newly constructed table */ @@ -136,16 +209,19 @@ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); + { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ + assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS); { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_sequences.h b/sys/contrib/zstd/lib/compress/zstd_compress_sequences.h --- a/sys/contrib/zstd/lib/compress/zstd_compress_sequences.h +++ b/sys/contrib/zstd/lib/compress/zstd_compress_sequences.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_sequences.c b/sys/contrib/zstd/lib/compress/zstd_compress_sequences.c --- a/sys/contrib/zstd/lib/compress/zstd_compress_sequences.c +++ b/sys/contrib/zstd/lib/compress/zstd_compress_sequences.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -58,7 +58,7 @@ { /* Heuristic: This should cover most blocks <= 16K and * start to fade out after 16K to about 32K depending on - * comprssibility. + * compressibility. */ return nbSeq >= 2048; } @@ -166,7 +166,7 @@ if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { - /* Prefer set_basic over set_rle when there are 2 or less symbols, + /* Prefer set_basic over set_rle when there are 2 or fewer symbols, * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_superblock.h b/sys/contrib/zstd/lib/compress/zstd_compress_superblock.h --- a/sys/contrib/zstd/lib/compress/zstd_compress_superblock.h +++ b/sys/contrib/zstd/lib/compress/zstd_compress_superblock.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstd_compress_superblock.c b/sys/contrib/zstd/lib/compress/zstd_compress_superblock.c --- a/sys/contrib/zstd/lib/compress/zstd_compress_superblock.c +++ b/sys/contrib/zstd/lib/compress/zstd_compress_superblock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -36,13 +36,14 @@ * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block - * Or 0 if it unable to compress. + * Or 0 if unable to compress. * Or error code */ -static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, - const ZSTD_hufCTablesMetadata_t* hufMetadata, - const BYTE* literals, size_t litSize, - void* dst, size_t dstSize, - const int bmi2, int writeEntropy, int* entropyWritten) +static size_t +ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + const BYTE* literals, size_t litSize, + void* dst, size_t dstSize, + const int bmi2, int writeEntropy, int* entropyWritten) { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); @@ -53,8 +54,6 @@ symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; - (void)bmi2; /* TODO bmi2... */ - DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); *entropyWritten = 0; @@ -76,9 +75,9 @@ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); } - /* TODO bmi2 */ - { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) - : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); + { int const flags = bmi2 ? HUF_flags_bmi2 : 0; + const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags) + : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags); op += cSize; cLitSize += cSize; if (cSize == 0 || ERR_isError(cSize)) { @@ -126,7 +125,11 @@ return op-ostart; } -static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { +static size_t +ZSTD_seqDecompressedSize(seqStore_t const* seqStore, + const seqDef* sequences, size_t nbSeq, + size_t litSize, int lastSequence) +{ const seqDef* const sstart = sequences; const seqDef* const send = sequences + nbSeq; const seqDef* sp = sstart; @@ -156,13 +159,14 @@ * @return : compressed size of sequences section of a sub-block * Or 0 if it is unable to compress * Or error code. */ -static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, - const ZSTD_fseCTablesMetadata_t* fseMetadata, - const seqDef* sequences, size_t nbSeq, - const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, - const ZSTD_CCtx_params* cctxParams, - void* dst, size_t dstCapacity, - const int bmi2, int writeEntropy, int* entropyWritten) +static size_t +ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + const seqDef* sequences, size_t nbSeq, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, int writeEntropy, int* entropyWritten) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; BYTE* const ostart = (BYTE*)dst; @@ -539,7 +543,7 @@ repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { - ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); + ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } diff --git a/sys/contrib/zstd/lib/compress/zstd_cwksp.h b/sys/contrib/zstd/lib/compress/zstd_cwksp.h --- a/sys/contrib/zstd/lib/compress/zstd_cwksp.h +++ b/sys/contrib/zstd/lib/compress/zstd_cwksp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,7 +14,9 @@ /*-************************************* * Dependencies ***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_internal.h" +#include "../common/portability_macros.h" #if defined (__cplusplus) extern "C" { @@ -44,8 +46,9 @@ ***************************************/ typedef enum { ZSTD_cwksp_alloc_objects, - ZSTD_cwksp_alloc_buffers, - ZSTD_cwksp_alloc_aligned + ZSTD_cwksp_alloc_aligned_init_once, + ZSTD_cwksp_alloc_aligned, + ZSTD_cwksp_alloc_buffers } ZSTD_cwksp_alloc_phase_e; /** @@ -98,8 +101,8 @@ * * Workspace Layout: * - * [ ... workspace ... ] - * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] + * [ ... workspace ... ] + * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] * * The various objects that live in the workspace are divided into the * following categories, and are allocated separately: @@ -123,9 +126,18 @@ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). * Their sizes depend on the cparams. These tables are 64-byte aligned. * - * - Aligned: these buffers are used for various purposes that require 4 byte - * alignment, but don't require any initialization before they're used. These - * buffers are each aligned to 64 bytes. + * - Init once: these buffers require to be initialized at least once before + * use. They should be used when we want to skip memory initialization + * while not triggering memory checkers (like Valgrind) when reading from + * from this memory without writing to it first. + * These buffers should be used carefully as they might contain data + * from previous compressions. + * Buffers are aligned to 64 bytes. + * + * - Aligned: these buffers don't require any initialization before they're + * used. The user of the buffer should make sure they write into a buffer + * location before reading from it. + * Buffers are aligned to 64 bytes. * * - Buffers: these buffers are used for various purposes that don't require * any alignment or initialization before they're used. This means they can @@ -137,8 +149,9 @@ * correctly packed into the workspace buffer. That order is: * * 1. Objects - * 2. Buffers - * 3. Aligned/Tables + * 2. Init once / Tables + * 3. Aligned / Tables + * 4. Buffers / Tables * * Attempts to reserve objects of different types out of order will fail. */ @@ -150,6 +163,7 @@ void* tableEnd; void* tableValidEnd; void* allocStart; + void* initOnceStart; BYTE allocFailed; int workspaceOversizedDuration; @@ -162,6 +176,7 @@ ***************************************/ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { (void)ws; @@ -171,6 +186,20 @@ assert(ws->tableEnd <= ws->allocStart); assert(ws->tableValidEnd <= ws->allocStart); assert(ws->allocStart <= ws->workspaceEnd); + assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); + assert(ws->workspace <= ws->initOnceStart); +#if ZSTD_MEMORY_SANITIZER + { + intptr_t const offset = __msan_test_shadow(ws->initOnceStart, + (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); +#if defined(ZSTD_MSAN_PRINT) + if(offset!=-1) { + __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); + } +#endif + assert(offset==-1); + }; +#endif } /** @@ -217,14 +246,10 @@ * for internal purposes (currently only alignment). */ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { - /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes - * to align the beginning of tables section, as well as another n_2=[0, 63] bytes - * to align the beginning of the aligned section. - * - * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and - * aligneds being sized in multiples of 64 bytes. + /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES + * bytes to align the beginning of tables section and end of buffers; */ - size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES; + size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; return slackSpace; } @@ -237,10 +262,18 @@ size_t const alignBytesMask = alignBytes - 1; size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; assert((alignBytes & alignBytesMask) == 0); - assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES); + assert(bytes < alignBytes); return bytes; } +/** + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ +MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) { + return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1)); +} + /** * Internal function. Do not use directly. * Reserves the given number of bytes within the aligned/buffer segment of the wksp, @@ -281,27 +314,16 @@ { assert(phase >= ws->phase); if (phase > ws->phase) { - /* Going from allocating objects to allocating buffers */ - if (ws->phase < ZSTD_cwksp_alloc_buffers && - phase >= ZSTD_cwksp_alloc_buffers) { + /* Going from allocating objects to allocating initOnce / tables */ + if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && + phase >= ZSTD_cwksp_alloc_aligned_init_once) { ws->tableValidEnd = ws->objectEnd; - } + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); - /* Going from allocating buffers to allocating aligneds/tables */ - if (ws->phase < ZSTD_cwksp_alloc_aligned && - phase >= ZSTD_cwksp_alloc_aligned) { - { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */ - size_t const bytesToAlign = - ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES); - DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign); - ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */ - RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign), - memory_allocation, "aligned phase - alignment initial allocation failed!"); - } { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ - void* const alloc = ws->objectEnd; + void *const alloc = ws->objectEnd; size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); - void* const objectEnd = (BYTE*)alloc + bytesToAlign; + void *const objectEnd = (BYTE *) alloc + bytesToAlign; DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, "table phase - alignment initial allocation failed!"); @@ -309,7 +331,9 @@ ws->tableEnd = objectEnd; /* table area starts being empty */ if (ws->tableValidEnd < ws->tableEnd) { ws->tableValidEnd = ws->tableEnd; - } } } + } + } + } ws->phase = phase; ZSTD_cwksp_assert_internal_consistency(ws); } @@ -321,7 +345,7 @@ */ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { - return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); + return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); } /** @@ -348,7 +372,9 @@ if (alloc) { alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { - __asan_unpoison_memory_region(alloc, bytes); + /* We need to keep the redzone poisoned while unpoisoning the bytes that + * are actually allocated. */ + __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); } } #endif @@ -364,6 +390,36 @@ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); } +/** + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) +{ + size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); + void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); + assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); + if(ptr && ptr < ws->initOnceStart) { + /* We assume the memory following the current allocation is either: + * 1. Not usable as initOnce memory (end of workspace) + * 2. Another initOnce buffer that has been allocated before (and so was previously memset) + * 3. An ASAN redzone, in which case we don't want to write on it + * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. + * Note that we assume here that MSAN and ASAN cannot run in the same time. */ + ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); + ws->initOnceStart = ptr; + } +#if ZSTD_MEMORY_SANITIZER + assert(__msan_test_shadow(ptr, bytes) == -1); +#endif + return ptr; +} + /** * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). */ @@ -382,13 +438,17 @@ */ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { - const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; + const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; void* alloc; void* end; void* top; - if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { - return NULL; + /* We can only start allocating tables after we are done reserving space for objects at the + * start of the workspace */ + if(ws->phase < phase) { + if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { + return NULL; + } } alloc = ws->tableEnd; end = (BYTE *)alloc + bytes; @@ -467,11 +527,19 @@ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the table re-use logic is sound, and that we don't * access table space that we haven't cleaned, we re-"poison" the table - * space every time we mark it dirty. */ + * space every time we mark it dirty. + * Since tableValidEnd space and initOnce space may overlap we don't poison + * the initOnce portion as it break its promise. This means that this poisoning + * check isn't always applied fully. */ { size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; assert(__msan_test_shadow(ws->objectEnd, size) == -1); - __msan_poison(ws->objectEnd, size); + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + __msan_poison(ws->objectEnd, size); + } else { + assert(ws->initOnceStart >= ws->objectEnd); + __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); + } } #endif @@ -499,7 +567,7 @@ assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { - ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); + ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); } ZSTD_cwksp_mark_tables_clean(ws); } @@ -536,11 +604,14 @@ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) /* To validate that the context re-use logic is sound, and that we don't * access stuff that this compression hasn't initialized, we re-"poison" - * the workspace (or at least the non-static, non-table parts of it) - * every time we start a new compression. */ + * the workspace except for the areas in which we expect memory re-use + * without initialization (objects, valid tables area and init once + * memory). */ { - size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; - __msan_poison(ws->tableValidEnd, size); + if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { + size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; + __msan_poison(ws->tableValidEnd, size); + } } #endif @@ -556,10 +627,10 @@ #endif ws->tableEnd = ws->objectEnd; - ws->allocStart = ws->workspaceEnd; + ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); ws->allocFailed = 0; - if (ws->phase > ZSTD_cwksp_alloc_buffers) { - ws->phase = ZSTD_cwksp_alloc_buffers; + if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { + ws->phase = ZSTD_cwksp_alloc_aligned_init_once; } ZSTD_cwksp_assert_internal_consistency(ws); } @@ -576,6 +647,7 @@ ws->workspaceEnd = (BYTE*)start + size; ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; + ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); ws->phase = ZSTD_cwksp_alloc_objects; ws->isStatic = isStatic; ZSTD_cwksp_clear(ws); @@ -628,17 +700,11 @@ * Returns if the estimated space needed for a wksp is within an acceptable limit of the * actual amount of space used. */ -MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws, - size_t const estimatedSpace, int resizedWorkspace) { - if (resizedWorkspace) { - /* Resized/newly allocated wksp should have exact bounds */ - return ZSTD_cwksp_used(ws) == estimatedSpace; - } else { - /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes - * than estimatedSpace. See the comments in zstd_cwksp.h for details. - */ - return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63); - } +MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { + /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice + * the alignment bytes difference between estimation and actual usage */ + return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && + ZSTD_cwksp_used(ws) <= estimatedSpace; } diff --git a/sys/contrib/zstd/lib/compress/zstd_double_fast.h b/sys/contrib/zstd/lib/compress/zstd_double_fast.h --- a/sys/contrib/zstd/lib/compress/zstd_double_fast.h +++ b/sys/contrib/zstd/lib/compress/zstd_double_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,8 @@ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); diff --git a/sys/contrib/zstd/lib/compress/zstd_double_fast.c b/sys/contrib/zstd/lib/compress/zstd_double_fast.c --- a/sys/contrib/zstd/lib/compress/zstd_double_fast.c +++ b/sys/contrib/zstd/lib/compress/zstd_double_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,8 +11,43 @@ #include "zstd_compress_internal.h" #include "zstd_double_fast.h" +static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashLarge = ms->hashTable; + U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + /* Always insert every fastHashFillStep position into the hash tables. + * Insert the other positions into the large hash table if their entry + * is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); + size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) { + ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); + } + if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { + ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); + } + /* Only load extra positions for ZSTD_dtlm_full */ + if (dtlm == ZSTD_dtlm_fast) + break; + } } +} + +static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -43,7 +78,19 @@ /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; - } } + } } +} + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); + } } @@ -67,7 +114,7 @@ const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; + U32 offsetSaved1 = 0, offsetSaved2 = 0; size_t mLength; U32 offset; @@ -100,8 +147,8 @@ U32 const current = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); U32 const maxRep = current - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; } /* Outer Loop: one iteration per match found and stored */ @@ -131,7 +178,7 @@ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); goto _match_stored; } @@ -175,9 +222,13 @@ } while (ip1 <= ilimit); _cleanup: + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -217,7 +268,7 @@ hashLong[hl1] = (U32)(ip1 - base); } - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); _match_stored: /* match found */ @@ -243,7 +294,7 @@ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength); ip += rLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -275,7 +326,6 @@ const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams; @@ -286,8 +336,8 @@ const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); - const U32 dictHBitsL = dictCParams->hashLog; - const U32 dictHBitsS = dictCParams->chainLog; + const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); @@ -295,6 +345,13 @@ /* if a dictionary is attached, it must be within window range */ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); + PREFETCH_AREA(dictHashLong, hashTableBytes) + PREFETCH_AREA(dictHashSmall, chainTableBytes) + } + /* init */ ip += (dictAndPrefixLength == 0); @@ -309,8 +366,12 @@ U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); - size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); + size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); + size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); + U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS]; + U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); + int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); U32 const curr = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; @@ -328,7 +389,7 @@ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); goto _match_stored; } @@ -340,9 +401,9 @@ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } - } else { + } else if (dictTagsMatchL) { /* check dictMatchState long match */ - U32 const dictMatchIndexL = dictHashLong[dictHL]; + U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); @@ -358,9 +419,9 @@ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } - } else { + } else if (dictTagsMatchS) { /* check dictMatchState short match */ - U32 const dictMatchIndexS = dictHashSmall[dictHS]; + U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS; match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; @@ -375,10 +436,11 @@ continue; _search_next_long: - { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); + size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8); U32 const matchIndexL3 = hashLong[hl3]; + U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); const BYTE* matchL3 = base + matchIndexL3; hashLong[hl3] = curr + 1; @@ -391,9 +453,9 @@ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } - } else { + } else if (dictTagsMatchL3) { /* check dict long +1 match */ - U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; + U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { @@ -419,7 +481,7 @@ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); _match_stored: /* match found */ @@ -448,7 +510,7 @@ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; @@ -461,8 +523,8 @@ } /* while (ip < ilimit) */ /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -585,7 +647,7 @@ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; @@ -596,7 +658,7 @@ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); @@ -621,7 +683,7 @@ } offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; @@ -653,7 +715,7 @@ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; diff --git a/sys/contrib/zstd/lib/compress/zstd_fast.h b/sys/contrib/zstd/lib/compress/zstd_fast.h --- a/sys/contrib/zstd/lib/compress/zstd_fast.h +++ b/sys/contrib/zstd/lib/compress/zstd_fast.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,8 @@ #include "zstd_compress_internal.h" void ZSTD_fillHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); diff --git a/sys/contrib/zstd/lib/compress/zstd_fast.c b/sys/contrib/zstd/lib/compress/zstd_fast.c --- a/sys/contrib/zstd/lib/compress/zstd_fast.c +++ b/sys/contrib/zstd/lib/compress/zstd_fast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,8 +11,42 @@ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" +static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; + U32 const mls = cParams->minMatch; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; -void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_full); + + /* Always insert every fastHashFillStep position into the hash table. + * Insert the other positions if their hash entry is empty. + */ + for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { + U32 const curr = (U32)(ip - base); + { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls); + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); } + + if (dtlm == ZSTD_dtlm_fast) continue; + /* Only load extra positions for ZSTD_dtlm_full */ + { U32 p; + for (p = 1; p < fastHashFillStep; ++p) { + size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); + } } } } +} + +static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -25,6 +59,10 @@ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; const U32 fastHashFillStep = 3; + /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_fast); + /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ @@ -42,6 +80,18 @@ } } } } } +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) +{ + if (tfp == ZSTD_tfp_forCDict) { + ZSTD_fillHashTableForCDict(ms, end, dtlm); + } else { + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } +} + /** * If you squint hard enough (and ignore repcodes), the search operation at any @@ -117,7 +167,7 @@ U32 rep_offset1 = rep[0]; U32 rep_offset2 = rep[1]; - U32 offsetSaved = 0; + U32 offsetSaved1 = 0, offsetSaved2 = 0; size_t hash0; /* hash for ip0 */ size_t hash1; /* hash for ip1 */ @@ -141,8 +191,8 @@ { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; - if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0; - if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0; + if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0; + if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0; } /* start each op */ @@ -180,8 +230,14 @@ mLength = ip0[-1] == match0[-1]; ip0 -= mLength; match0 -= mLength; - offcode = STORE_REPCODE_1; + offcode = REPCODE1_TO_OFFBASE; mLength += 4; + + /* First write next hash table entry; we've already calculated it. + * This write is known to be safe because the ip1 is before the + * repcode (ip2). */ + hashTable[hash1] = (U32)(ip1 - base); + goto _match; } @@ -195,6 +251,12 @@ /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ + + /* First write next hash table entry; we've already calculated it. + * This write is known to be safe because the ip1 == ip0 + 1, so + * we know we will resume searching after ip1 */ + hashTable[hash1] = (U32)(ip1 - base); + goto _offset; } @@ -224,6 +286,21 @@ /* check match at ip[0] */ if (MEM_read32(ip0) == mval) { /* found a match! */ + + /* first write next hash table entry; we've already calculated it */ + if (step <= 4) { + /* We need to avoid writing an index into the hash table >= the + * position at which we will pick up our searching after we've + * taken this match. + * + * The minimum possible match has length 4, so the earliest ip0 + * can be after we take this match will be the current ip0 + 4. + * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely + * write this position. + */ + hashTable[hash1] = (U32)(ip1 - base); + } + goto _offset; } @@ -254,9 +331,24 @@ * However, it seems to be a meaningful performance hit to try to search * them. So let's not. */ + /* When the repcodes are outside of the prefix, we set them to zero before the loop. + * When the offsets are still zero, we need to restore them after the block to have a correct + * repcode history. If only one offset was invalid, it is easy. The tricky case is when both + * offsets were invalid. We need to figure out which offset to refill with. + * - If both offsets are zero they are in the same order. + * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`. + * - If only one is zero, we need to decide which offset to restore. + * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1. + * - It is impossible for rep_offset2 to be non-zero. + * + * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then + * set rep[0] = rep_offset1 and rep[1] = offsetSaved1. + */ + offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2; + /* save reps for next block */ - rep[0] = rep_offset1 ? rep_offset1 : offsetSaved; - rep[1] = rep_offset2 ? rep_offset2 : offsetSaved; + rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1; + rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -267,7 +359,7 @@ match0 = base + idx; rep_offset2 = rep_offset1; rep_offset1 = (U32)(ip0-match0); - offcode = STORE_OFFSET(rep_offset1); + offcode = OFFSET_TO_OFFBASE(rep_offset1); mLength = 4; /* Count the backwards match length. */ @@ -287,11 +379,6 @@ ip0 += mLength; anchor = ip0; - /* write next hash table entry */ - if (ip1 < ip0) { - hashTable[hash1] = (U32)(ip1 - base); - } - /* Fill table and check for immediate repcode. */ if (ip0 <= ilimit) { /* Fill Table */ @@ -306,7 +393,7 @@ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; - ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength); + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength); anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } @@ -380,14 +467,14 @@ U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; + const BYTE* ip0 = istart; + const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */ const BYTE* anchor = istart; const U32 prefixStartIndex = ms->window.dictLimit; const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; @@ -397,13 +484,13 @@ const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); - const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); - const U32 dictHLog = dictCParams->hashLog; + const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart); + const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; /* if a dictionary is still attached, it necessarily means that * it is within window size. So we just check it. */ const U32 maxDistance = 1U << cParams->windowLog; - const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ @@ -413,106 +500,155 @@ * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); + if (ms->prefetchCDictTables) { + size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); + PREFETCH_AREA(dictHashTable, hashTableBytes) + } + /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); - ip += (dictAndPrefixLength == 0); + ip0 += (dictAndPrefixLength == 0); /* dictMatchState repCode checks don't currently handle repCode == 0 * disabling. */ assert(offset_1 <= dictAndPrefixLength); assert(offset_2 <= dictAndPrefixLength); - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + /* Outer search loop */ + assert(stepSize >= 1); + while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */ size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hlog, mls); - U32 const curr = (U32)(ip-base); - U32 const matchIndex = hashTable[h]; - const BYTE* match = base + matchIndex; - const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (repIndex < prefixStartIndex) ? - dictBase + (repIndex - dictIndexDelta) : - base + repIndex; - hashTable[h] = curr; /* update hash table */ - - if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); - } else if ( (matchIndex <= prefixStartIndex) ) { - size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); - U32 const dictMatchIndex = dictHashTable[dictHash]; - const BYTE* dictMatch = dictBase + dictMatchIndex; - if (dictMatchIndex <= dictStartIndex || - MEM_read32(dictMatch) != MEM_read32(ip)) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a dict match */ - U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta); - mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; - while (((ip>anchor) & (dictMatch>dictStart)) - && (ip[-1] == dictMatch[-1])) { - ip--; dictMatch--; mLength++; + size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls); + + size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); + U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS]; + int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); + + U32 matchIndex = hashTable[hash0]; + U32 curr = (U32)(ip0 - base); + size_t step = stepSize; + const size_t kStepIncr = 1 << kSearchStrength; + const BYTE* nextStep = ip0 + kStepIncr; + + /* Inner search loop */ + while (1) { + const BYTE* match = base + matchIndex; + const U32 repIndex = curr + 1 - offset_1; + const BYTE* repMatch = (repIndex < prefixStartIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls); + size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; /* update hash table */ + + if (((U32) ((prefixStartIndex - 1) - repIndex) >= + 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ + && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; + ip0++; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); + break; + } + + if (dictTagsMatch) { + /* Found a possible dict match */ + const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; + const BYTE* dictMatch = dictBase + dictMatchIndex; + if (dictMatchIndex > dictStartIndex && + MEM_read32(dictMatch) == MEM_read32(ip0)) { + /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */ + if (matchIndex <= prefixStartIndex) { + U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta); + mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; + while (((ip0 > anchor) & (dictMatch > dictStart)) + && (ip0[-1] == dictMatch[-1])) { + ip0--; + dictMatch--; + mLength++; + } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; + } + } + } + + if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) { + /* found a regular match */ + U32 const offset = (U32) (ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (((ip0 > anchor) & (match > prefixStart)) + && (ip0[-1] == match[-1])) { + ip0--; + match--; + mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); + break; } - } else if (MEM_read32(match) != MEM_read32(ip)) { - /* it's not a match, and we're not going to check the dictionary */ - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; - } else { - /* found a regular match */ - U32 const offset = (U32)(ip-match); - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - while (((ip>anchor) & (match>prefixStart)) - && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); - } + + /* Prepare for next iteration */ + dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS]; + dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); + matchIndex = hashTable[hash1]; + + if (ip1 >= nextStep) { + step++; + nextStep += kStepIncr; + } + ip0 = ip1; + ip1 = ip1 + step; + if (ip1 > ilimit) goto _cleanup; + + curr = (U32)(ip0 - base); + hash0 = hash1; + } /* end inner search loop */ /* match found */ - ip += mLength; - anchor = ip; + assert(mLength); + ip0 += mLength; + anchor = ip0; - if (ip <= ilimit) { + if (ip0 <= ilimit) { /* Fill Table */ assert(base+curr+2 > istart); /* check base overflow */ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); + while (ip0 <= ilimit) { + U32 const current2 = (U32)(ip0-base); U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + && (MEM_read32(repMatch2) == MEM_read32(ip0))) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; + ip0 += repLength2; + anchor = ip0; continue; } break; } } + + /* Prepare for next iteration */ + assert(ip0 == anchor); + ip1 = ip0 + stepSize; } +_cleanup: /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = offset_1; + rep[1] = offset_2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -553,11 +689,10 @@ U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; const BYTE* const base = ms->window.base; const BYTE* const dictBase = ms->window.dictBase; const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); @@ -570,6 +705,28 @@ const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; + + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 idx; /* match idx for ip0 */ + const BYTE* idxBase; /* base pointer for idx */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */ + + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); (void)hasStep; /* not currently specialized on whether it's accelerated */ @@ -579,75 +736,202 @@ if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t h = ZSTD_hashPtr(ip, hlog, mls); - const U32 matchIndex = hashTable[h]; - const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - const U32 curr = (U32)(ip-base); - const U32 repIndex = curr + 1 - offset_1; - const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - hashTable[h] = curr; /* update hash table */ - DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); - - if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ - & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; - size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength); - ip += rLength; - anchor = ip; - } else { - if ( (matchIndex < dictStartIndex) || - (MEM_read32(match) != MEM_read32(ip)) ) { - assert(stepSize >= 1); - ip += ((ip-anchor) >> kSearchStrength) + stepSize; - continue; + { U32 const curr = (U32)(ip0 - base); + U32 const maxRep = curr - dictStartIndex; + if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0; + } + + /* start each op */ +_start: /* Requires: ip0 */ + + step = stepSize; + nextStep = ip0 + kStepIncr; + + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + + if (ip3 >= ilimit) { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + idx = hashTable[hash0]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + do { + { /* load repcode match for ip[2] */ + U32 const current2 = (U32)(ip2 - base); + U32 const repIndex = current2 - offset_1; + const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; + U32 rval; + if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */ + & (offset_1 > 0) ) { + rval = MEM_read32(repBase + repIndex); + } else { + rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */ } - { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; - const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; - U32 const offset = curr - matchIndex; - size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = offset; /* update offset history */ - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); - ip += mLength; - anchor = ip; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if (MEM_read32(ip2) == rval) { + ip0 = ip2; + match0 = repBase + repIndex; + matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + assert((match0 != prefixStart) & (match0 != dictStart)); + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; + offcode = REPCODE1_TO_OFFBASE; + mLength += 4; + goto _match; } } - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; - hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */ - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; - { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2); - hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + { /* load match for ip[0] */ + U32 const mval = idx >= dictStartIndex ? + MEM_read32(idxBase + idx) : + MEM_read32(ip0) ^ 1; /* guaranteed not to match */ + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } } + + /* lookup ip[1] */ + idx = hashTable[hash1]; + idxBase = idx < prefixStartIndex ? dictBase : base; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + + /* calculate step */ + if (ip2 >= nextStep) { + step++; + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + nextStep += kStepIncr; + } + } while (ip3 < ilimit); + +_cleanup: + /* Note that there are probably still a couple positions we could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ + + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; /* save reps for next block */ - rep[0] = offset_1; - rep[1] = offset_2; + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx, idxBase */ + + /* Compute the offset code. */ + { U32 const offset = current0 - idx; + const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; + matchEnd = idx < prefixStartIndex ? dictEnd : iend; + match0 = idxBase + idx; + offset_2 = offset_1; + offset_1 = offset; + offcode = OFFSET_TO_OFFBASE(offset); + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } } + +_match: /* Requires: ip0, match0, offcode, matchEnd */ + + /* Count the forward length. */ + assert(matchEnd != 0); + mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); + + ip0 += mLength; + anchor = ip0; + + /* write next hash table entry */ + if (ip1 < ip0) { + hashTable[hash1] = (U32)(ip1 - base); + } + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + while (ip0 <= ilimit) { + U32 const repIndex2 = (U32)(ip0-base) - offset_2; + const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += repLength2; + anchor = ip0; + continue; + } + break; + } } + + goto _start; } ZSTD_GEN_FAST_FN(extDict, 4, 0) @@ -660,6 +944,7 @@ void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; + assert(ms->dictMatchState == NULL); switch(mls) { default: /* includes case 3 */ diff --git a/sys/contrib/zstd/lib/compress/zstd_lazy.h b/sys/contrib/zstd/lib/compress/zstd_lazy.h --- a/sys/contrib/zstd/lib/compress/zstd_lazy.h +++ b/sys/contrib/zstd/lib/compress/zstd_lazy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,6 +25,8 @@ */ #define ZSTD_LAZY_DDSS_BUCKET_LOG 2 +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ + U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); @@ -116,7 +118,7 @@ size_t ZSTD_compressBlock_btlazy2_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - + #if defined (__cplusplus) } diff --git a/sys/contrib/zstd/lib/compress/zstd_lazy.c b/sys/contrib/zstd/lib/compress/zstd_lazy.c --- a/sys/contrib/zstd/lib/compress/zstd_lazy.c +++ b/sys/contrib/zstd/lib/compress/zstd_lazy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -10,6 +10,9 @@ #include "zstd_compress_internal.h" #include "zstd_lazy.h" +#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */ + +#define kLazySkippingStep 8 /*-************************************* @@ -197,8 +200,8 @@ U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", - curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex); - bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); + curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex); + bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ @@ -218,7 +221,7 @@ } if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } @@ -230,7 +233,7 @@ static size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, + size_t* offBasePtr, U32 const mls, const ZSTD_dictMode_e dictMode) { @@ -327,8 +330,8 @@ if (matchLength > bestLength) { if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) - bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) ) + bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any @@ -361,16 +364,16 @@ if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, - offsetPtr, bestLength, nbCompares, + offBasePtr, bestLength, nbCompares, mls, dictMode); } assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { - U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; + U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", - curr, (U32)bestLength, (U32)*offsetPtr, mIndex); + curr, (U32)bestLength, (U32)*offBasePtr, mIndex); } return bestLength; } @@ -381,14 +384,14 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, + size_t* offBasePtr, const U32 mls /* template */, const ZSTD_dictMode_e dictMode) { DEBUGLOG(7, "ZSTD_BtFindBestMatch"); if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ ZSTD_updateDUBT(ms, ip, iLimit, mls); - return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode); } /*********************************** @@ -561,7 +564,7 @@ /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) { /* best possible, avoids read overflow on next attempt */ return ml; @@ -598,7 +601,7 @@ /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } @@ -617,7 +620,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( ZSTD_matchState_t* ms, const ZSTD_compressionParameters* const cParams, - const BYTE* ip, U32 const mls) + const BYTE* ip, U32 const mls, U32 const lazySkipping) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; @@ -632,6 +635,9 @@ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; + /* Stop inserting every position when in the lazy skipping mode. */ + if (lazySkipping) + break; } ms->nextToUpdate = target; @@ -640,7 +646,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0); } /* inlining is important to hardwire a hot branch (template emulation) */ @@ -684,14 +690,15 @@ } /* HC4 match finder */ - matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); + matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping); for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) { size_t currentMl=0; if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - if (match[ml] == ip[ml]) /* potentially better */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; @@ -703,7 +710,7 @@ /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - matchIndex); + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -739,7 +746,7 @@ if (currentMl > ml) { ml = currentMl; assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -756,8 +763,6 @@ * (SIMD) Row-based matchfinder ***********************************/ /* Constants for row-based hash */ -#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ @@ -769,73 +774,19 @@ * Starting from the LSB, returns the idx of the next non-zero bit. * Basically counting the nb of trailing zeroes. */ -static U32 ZSTD_VecMask_next(ZSTD_VecMask val) { - assert(val != 0); -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, val); - return (U32)(r); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)))) - if (sizeof(size_t) == 4) { - U32 mostSignificantWord = (U32)(val >> 32); - U32 leastSignificantWord = (U32)val; - if (leastSignificantWord == 0) { - return 32 + (U32)__builtin_ctz(mostSignificantWord); - } else { - return (U32)__builtin_ctz(leastSignificantWord); - } - } else { - return (U32)__builtin_ctzll(val); - } -# else - /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count - * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer - */ - val = ~val & (val - 1ULL); /* Lowest set bit mask */ - val = val - ((val >> 1) & 0x5555555555555555); - val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); - return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56); -# endif -} - -/* ZSTD_rotateRight_*(): - * Rotates a bitfield to the right by "count" bits. - * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts - */ -FORCE_INLINE_TEMPLATE -U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { - assert(count < 64); - count &= 0x3F; /* for fickle pattern recognition */ - return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); -} - -FORCE_INLINE_TEMPLATE -U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { - assert(count < 32); - count &= 0x1F; /* for fickle pattern recognition */ - return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); -} - -FORCE_INLINE_TEMPLATE -U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { - assert(count < 16); - count &= 0x0F; /* for fickle pattern recognition */ - return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); +MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) { + return ZSTD_countTrailingZeros64(val); } /* ZSTD_row_nextIndex(): * Returns the next index to insert at within a tagTable row, and updates the "head" - * value to reflect the update. Essentially cycles backwards from [0, {entries per row}) + * value to reflect the update. Essentially cycles backwards from [1, {entries per row}) */ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) { - U32 const next = (*tagRow - 1) & rowMask; - *tagRow = (BYTE)next; - return next; + U32 next = (*tagRow-1) & rowMask; + next += (next == 0) ? rowMask : 0; /* skip first position */ + *tagRow = (BYTE)next; + return next; } /* ZSTD_isAligned(): @@ -849,7 +800,7 @@ /* ZSTD_row_prefetch(): * Performs prefetching for the hashTable and tagTable at a given row. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) { +FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) { PREFETCH_L1(hashTable + relRow); if (rowLog >= 5) { PREFETCH_L1(hashTable + relRow + 16); @@ -873,13 +824,13 @@ U32 idx, const BYTE* const iLimit) { U32 const* const hashTable = ms->hashTable; - U16 const* const tagTable = ms->tagTable; + BYTE const* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1); U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch); for (; idx < lim; ++idx) { - U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash; @@ -895,11 +846,12 @@ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. */ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, - U16 const* tagTable, BYTE const* base, + BYTE const* tagTable, BYTE const* base, U32 idx, U32 const hashLog, - U32 const rowLog, U32 const mls) + U32 const rowLog, U32 const mls, + U64 const hashSalt) { - U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; ZSTD_row_prefetch(hashTable, tagTable, row, rowLog); { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK]; @@ -917,22 +869,21 @@ U32 const rowMask, U32 const useCache) { U32* const hashTable = ms->hashTable; - U16* const tagTable = ms->tagTable; + BYTE* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; const BYTE* const base = ms->window.base; DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { - U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls) - : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt) + : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32* const row = hashTable + relRow; - BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte. - Explicit cast allows us to get exact desired position within each row */ + BYTE* tagRow = tagTable + relRow; U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); - ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK; + assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt)); + tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK; row[pos] = updateStartIdx; } } @@ -980,7 +931,35 @@ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog); - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */); + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */); +} + +/* Returns the mask width of bits group of which will be set to 1. Given not all + * architectures have easy movemask instruction, this helps to iterate over + * groups of bits easier and faster. + */ +FORCE_INLINE_TEMPLATE U32 +ZSTD_row_matchMaskGroupWidth(const U32 rowEntries) +{ + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + (void)rowEntries; +#if defined(ZSTD_ARCH_ARM_NEON) + /* NEON path only works for little endian */ + if (!MEM_isLittleEndian()) { + return 1; + } + if (rowEntries == 16) { + return 4; + } + if (rowEntries == 32) { + return 2; + } + if (rowEntries == 64) { + return 1; + } +#endif + return 1; } #if defined(ZSTD_ARCH_X86_SSE2) @@ -1003,71 +982,82 @@ } #endif -/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches - * the hash at the nth position in a row of the tagTable. - * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield - * to match up with the actual layout of the entries within the hashTable */ +#if defined(ZSTD_ARCH_ARM_NEON) +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped) +{ + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + if (rowEntries == 16) { + /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits. + * After that groups of 4 bits represent the equalMask. We lower + * all bits except the highest in these groups by doing AND with + * 0x88 = 0b10001000. + */ + const uint8x16_t chunk = vld1q_u8(src); + const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); + const uint8x8_t res = vshrn_n_u16(equalMask, 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0); + return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull; + } else if (rowEntries == 32) { + /* Same idea as with rowEntries == 16 but doing AND with + * 0x55 = 0b01010101. + */ + const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src); + const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); + const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6); + const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6); + const uint8x8_t res = vsli_n_u8(t0, t1, 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ; + return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull; + } else { /* rowEntries == 64 */ + const uint8x16x4_t chunk = vld4q_u8(src); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); + const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); + const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); + const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); + + const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); + const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); + const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); + const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); + const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); + return ZSTD_rotateRight_U64(matches, headGrouped); + } +} +#endif + +/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by + * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag" + * matches the hash at the nth position in a row of the tagTable. + * Each row is a circular buffer beginning at the value of "headGrouped". So we + * must rotate the "matches" bitfield to match up with the actual layout of the + * entries within the hashTable */ FORCE_INLINE_TEMPLATE ZSTD_VecMask -ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) +ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries) { - const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET; + const BYTE* const src = tagRow; assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8); #if defined(ZSTD_ARCH_X86_SSE2) - return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head); + return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped); #else /* SW or NEON-LE */ # if defined(ZSTD_ARCH_ARM_NEON) /* This NEON path only works for little endian - otherwise use SWAR below */ if (MEM_isLittleEndian()) { - if (rowEntries == 16) { - const uint8x16_t chunk = vld1q_u8(src); - const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); - const uint16x8_t t0 = vshlq_n_u16(equalMask, 7); - const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14)); - const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14)); - const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28)); - const U16 hi = (U16)vgetq_lane_u8(t3, 8); - const U16 lo = (U16)vgetq_lane_u8(t3, 0); - return ZSTD_rotateRight_U16((hi << 8) | lo, head); - } else if (rowEntries == 32) { - const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src); - const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); - const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); - const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag)); - const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag)); - const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0)); - const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1)); - const uint8x8_t t0 = vreinterpret_u8_s8(pack0); - const uint8x8_t t1 = vreinterpret_u8_s8(pack1); - const uint8x8_t t2 = vsri_n_u8(t1, t0, 2); - const uint8x8x2_t t3 = vuzp_u8(t2, t0); - const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4); - const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0); - return ZSTD_rotateRight_U32(matches, head); - } else { /* rowEntries == 64 */ - const uint8x16x4_t chunk = vld4q_u8(src); - const uint8x16_t dup = vdupq_n_u8(tag); - const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); - const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); - const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); - const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); - - const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); - const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); - const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); - const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); - const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); - const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); - return ZSTD_rotateRight_U64(matches, head); - } + return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped); } # endif /* ZSTD_ARCH_ARM_NEON */ /* SWAR */ - { const size_t chunkSize = sizeof(size_t); + { const int chunkSize = sizeof(size_t); const size_t shiftAmount = ((chunkSize * 8) - chunkSize); const size_t xFF = ~((size_t)0); const size_t x01 = xFF / 0xFF; @@ -1100,11 +1090,11 @@ } matches = ~matches; if (rowEntries == 16) { - return ZSTD_rotateRight_U16((U16)matches, head); + return ZSTD_rotateRight_U16((U16)matches, headGrouped); } else if (rowEntries == 32) { - return ZSTD_rotateRight_U32((U32)matches, head); + return ZSTD_rotateRight_U32((U32)matches, headGrouped); } else { - return ZSTD_rotateRight_U64((U64)matches, head); + return ZSTD_rotateRight_U64((U64)matches, headGrouped); } } #endif @@ -1134,7 +1124,7 @@ const U32 rowLog) { U32* const hashTable = ms->hashTable; - U16* const tagTable = ms->tagTable; + BYTE* const tagTable = ms->tagTable; U32* const hashCache = ms->hashCache; const U32 hashLog = ms->rowHashLog; const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -1152,8 +1142,11 @@ const U32 rowEntries = (1U << rowLog); const U32 rowMask = rowEntries - 1; const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ + const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries); + const U64 hashSalt = ms->hashSalt; U32 nbAttempts = 1U << cappedSearchLog; size_t ml=4-1; + U32 hash; /* DMS/DDS variables that may be referenced laster */ const ZSTD_matchState_t* const dms = ms->dictMatchState; @@ -1177,7 +1170,7 @@ if (dictMode == ZSTD_dictMatchState) { /* Prefetch DMS rows */ U32* const dmsHashTable = dms->hashTable; - U16* const dmsTagTable = dms->tagTable; + BYTE* const dmsTagTable = dms->tagTable; U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls); U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK; @@ -1187,23 +1180,34 @@ } /* Update the hashTable and tagTable up to (but not including) ip */ - ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); + if (!ms->lazySkipping) { + ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */); + hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt); + } else { + /* Stop inserting every position when in the lazy skipping mode. + * The hash cache is also not kept up to date in this mode. + */ + hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt); + ms->nextToUpdate = curr; + } + ms->hashSaltEntropy += hash; /* collect salt entropy */ + { /* Get the hash for ip, compute the appropriate row */ - U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK; U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); - U32 const head = *tagRow & rowMask; + U32 const headGrouped = (*tagRow & rowMask) * groupWidth; U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries); + ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries); /* Cycle through the matches and prefetch */ - for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { - U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask; + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = row[matchPos]; + if(matchPos == 0) continue; assert(numMatches < rowEntries); if (matchIndex < lowLimit) break; @@ -1213,13 +1217,14 @@ PREFETCH_L1(dictBase + matchIndex); } matchBuffer[numMatches++] = matchIndex; + --nbAttempts; } /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop in ZSTD_row_update_internal() at the next search. */ { U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag; + tagRow[pos] = (BYTE)tag; row[pos] = ms->nextToUpdate++; } @@ -1233,7 +1238,8 @@ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { const BYTE* const match = base + matchIndex; assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ - if (match[ml] == ip[ml]) /* potentially better */ + /* read 4B starting from (match + ml + 1 - sizeof(U32)) */ + if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */ currentMl = ZSTD_count(ip, match, iLimit); } else { const BYTE* const match = dictBase + matchIndex; @@ -1245,7 +1251,7 @@ /* Save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = STORE_OFFSET(curr - matchIndex); + *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } @@ -1263,19 +1269,21 @@ const U32 dmsSize = (U32)(dmsEnd - dmsBase); const U32 dmsIndexDelta = dictLimit - dmsSize; - { U32 const head = *dmsTagRow & rowMask; + { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth; U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; - ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries); + ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries); - for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) { - U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask; + for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) { + U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask; U32 const matchIndex = dmsRow[matchPos]; + if(matchPos == 0) continue; if (matchIndex < dmsLowestIndex) break; PREFETCH_L1(dmsBase + matchIndex); matchBuffer[numMatches++] = matchIndex; + --nbAttempts; } /* Return the longest match */ @@ -1294,7 +1302,7 @@ if (currentMl > ml) { ml = currentMl; assert(curr > matchIndex + dmsIndexDelta); - *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); + *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; } } @@ -1304,14 +1312,10 @@ } -typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - /** - * This struct contains the functions necessary for lazy to search. - * Currently, that is only searchMax. However, it is still valuable to have the - * VTable because this makes it easier to add more functions to the VTable later. + * Generate search functions templated on (dictMode, mls, rowLog). + * These functions are outlined for code size & compilation time. + * ZSTD_searchMax() dispatches to the correct implementation function. * * TODO: The start of the search function involves loading and calculating a * bunch of constants from the ZSTD_matchState_t. These computations could be @@ -1329,25 +1333,25 @@ * the single segment loop. It should go in searchMax instead of its own * function to avoid having multiple virtual function calls per search. */ -typedef struct { - searchMax_f searchMax; -} ZSTD_LazyVTable; -#define GEN_ZSTD_BT_VTABLE(dictMode, mls) \ - static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \ - ZSTD_matchState_t* ms, \ - const BYTE* ip, const BYTE* const iLimit, \ - size_t* offsetPtr) \ - { \ - assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ - return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ - } \ - static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \ - ZSTD_BtFindBestMatch_##dictMode##_##mls \ - }; +#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls +#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls +#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog -#define GEN_ZSTD_HC_VTABLE(dictMode, mls) \ - static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \ +#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE + +#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offBasePtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \ + } \ + +#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ ZSTD_matchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ @@ -1355,12 +1359,9 @@ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ } \ - static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \ - ZSTD_HcFindBestMatch_##dictMode##_##mls \ - }; -#define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \ - static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \ +#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ ZSTD_matchState_t* ms, \ const BYTE* ip, const BYTE* const iLimit, \ size_t* offsetPtr) \ @@ -1369,9 +1370,6 @@ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ } \ - static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \ - ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \ - }; #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ X(dictMode, mls, 4) \ @@ -1394,84 +1392,103 @@ X(__VA_ARGS__, dictMatchState) \ X(__VA_ARGS__, dedicatedDictSearch) -/* Generate Row VTables for each combination of (dictMode, mls, rowLog) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE) -/* Generate Binary Tree VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE) -/* Generate Hash Chain VTables for each combination of (dictMode, mls) */ -ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE) - -#define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_BtVTable_##dictMode##_4, \ - &ZSTD_BtVTable_##dictMode##_5, \ - &ZSTD_BtVTable_##dictMode##_6 \ - } +/* Generate row search fns for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN) +/* Generate binary Tree search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN) +/* Generate hash chain search fns for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN) -#define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \ - { \ - &ZSTD_HcVTable_##dictMode##_4, \ - &ZSTD_HcVTable_##dictMode##_5, \ - &ZSTD_HcVTable_##dictMode##_6 \ - } - -#define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \ - { \ - &ZSTD_RowVTable_##dictMode##_##mls##_4, \ - &ZSTD_RowVTable_##dictMode##_##mls##_5, \ - &ZSTD_RowVTable_##dictMode##_##mls##_6 \ - } +typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; -#define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \ - { \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \ - GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \ +#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \ + case mls: \ + return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr); +#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + case rowLog: \ + return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr); + +#define ZSTD_SWITCH_MLS(X, dictMode) \ + switch (mls) { \ + ZSTD_FOR_EACH_MLS(X, dictMode) \ } -#define GEN_ZSTD_VTABLE_ARRAY(X) \ - { \ - X(noDict), \ - X(extDict), \ - X(dictMatchState), \ - X(dedicatedDictSearch) \ - } - -/* ******************************* -* Common parser - lazy strategy -*********************************/ -typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; +#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \ + case mls: \ + switch (rowLog) { \ + ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \ + } \ + ZSTD_UNREACHABLE; \ + break; + +#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \ + switch (searchMethod) { \ + case search_hashChain: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \ + break; \ + case search_binaryTree: \ + ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \ + break; \ + case search_rowHash: \ + ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \ + break; \ + } \ + ZSTD_UNREACHABLE; /** - * This table is indexed first by the four ZSTD_dictMode_e values, and then - * by the two searchMethod_e values. NULLs are placed for configurations - * that should never occur (extDict modes go to the other implementation - * below and there is no DDSS for binary tree search yet). + * Searches for the longest match at @p ip. + * Dispatches to the correct implementation function based on the + * (searchMethod, dictMode, mls, rowLog). We use switch statements + * here instead of using an indirect function call through a function + * pointer because after Spectre and Meltdown mitigations, indirect + * function calls can be very costly, especially in the kernel. + * + * NOTE: dictMode and searchMethod should be templated, so those switch + * statements should be optimized out. Only the mls & rowLog switches + * should be left. + * + * @param ms The match state. + * @param ip The position to search at. + * @param iend The end of the input data. + * @param[out] offsetPtr Stores the match offset into this pointer. + * @param mls The minimum search length, in the range [4, 6]. + * @param rowLog The row log (if applicable), in the range [4, 6]. + * @param searchMethod The search method to use (templated). + * @param dictMode The dictMode (templated). + * + * @returns The length of the longest match found, or < mls if no match is found. + * If a match is found its offset is stored in @p offsetPtr. */ - -static ZSTD_LazyVTable const* -ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) +FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( + ZSTD_matchState_t* ms, + const BYTE* ip, + const BYTE* iend, + size_t* offsetPtr, + U32 const mls, + U32 const rowLog, + searchMethod_e const searchMethod, + ZSTD_dictMode_e const dictMode) { - /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */ - ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY); - ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY); - /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */ - ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY); - - U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch)); - U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); - switch (searchMethod) { - case search_hashChain: - return hcVTables[dictMode][mls - 4]; - case search_binaryTree: - return btVTables[dictMode][mls - 4]; - case search_rowHash: - return rowVTables[dictMode][mls - 4][rowLog - 4]; - default: - return NULL; + if (dictMode == ZSTD_noDict) { + ZSTD_SWITCH_SEARCH_METHOD(noDict) + } else if (dictMode == ZSTD_extDict) { + ZSTD_SWITCH_SEARCH_METHOD(extDict) + } else if (dictMode == ZSTD_dictMatchState) { + ZSTD_SWITCH_SEARCH_METHOD(dictMatchState) + } else if (dictMode == ZSTD_dedicatedDictSearch) { + ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch) } + ZSTD_UNREACHABLE; + return 0; } +/* ******************************* +* Common parser - lazy strategy +*********************************/ + FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, @@ -1488,9 +1505,11 @@ const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax; - U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; + U32 offset_1 = rep[0], offset_2 = rep[1]; + U32 offsetSaved1 = 0, offsetSaved2 = 0; const int isDMS = dictMode == ZSTD_dictMatchState; const int isDDS = dictMode == ZSTD_dedicatedDictSearch; @@ -1505,16 +1524,14 @@ 0; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); - assert(searchMax != NULL); - DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod); ip += (dictAndPrefixLength == 0); if (dictMode == ZSTD_noDict) { U32 const curr = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; - if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; } if (isDxS) { /* dictMatchState repCode checks don't currently handle repCode == 0 @@ -1523,11 +1540,11 @@ assert(offset_2 <= dictAndPrefixLength); } + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + if (searchMethod == search_rowHash) { - const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); - ZSTD_row_fillHashCache(ms, base, rowLog, - MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), - ms->nextToUpdate, ilimit); + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); } /* Match Loop */ @@ -1539,7 +1556,7 @@ #endif while (ip < ilimit) { size_t matchLength=0; - size_t offcode=STORE_REPCODE_1; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; DEBUGLOG(7, "search baseline (depth 0)"); @@ -1564,14 +1581,23 @@ } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t offbaseFound = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode); if (ml2 > matchLength) - matchLength = ml2, start = ip, offcode=offsetFound; + matchLength = ml2, start = ip, offBase = offbaseFound; } if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */; + ip += step; + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } @@ -1581,12 +1607,12 @@ DEBUGLOG(7, "search depth 1"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1598,17 +1624,17 @@ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } @@ -1617,12 +1643,12 @@ DEBUGLOG(7, "search depth 2"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1634,17 +1660,17 @@ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; + matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip; } } - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); + { size_t ofbCandidate=999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ @@ -1655,26 +1681,33 @@ * notably if `value` is unsigned, resulting in a large positive `-value`. */ /* catch up */ - if (STORED_IS_OFFSET(offcode)) { + if (OFFBASE_IS_OFFSET(offBase)) { if (dictMode == ZSTD_noDict) { - while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest)) - && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */ + while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest)) + && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } if (isDxS) { - U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } - offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ if (isDxS) { @@ -1688,8 +1721,8 @@ && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; @@ -1703,16 +1736,20 @@ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ } } } - /* Save reps for next block */ - rep[0] = offset_1 ? offset_1 : savedOffset; - rep[1] = offset_2 ? offset_2 : savedOffset; + /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), + * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ + offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved1; + rep[1] = offset_2 ? offset_2 : offsetSaved2; /* Return the last literals size */ return (size_t)(iend - anchor); @@ -1881,19 +1918,20 @@ const BYTE* const dictEnd = dictBase + dictLimit; const BYTE* const dictStart = dictBase + ms->window.lowLimit; const U32 windowLog = ms->cParams.windowLog; - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; + const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); - searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); + /* Reset the lazy skipping state */ + ms->lazySkipping = 0; + /* init */ ip += (ip == prefixStart); if (searchMethod == search_rowHash) { - ZSTD_row_fillHashCache(ms, base, rowLog, - MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), - ms->nextToUpdate, ilimit); + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); } /* Match Loop */ @@ -1905,7 +1943,7 @@ #endif while (ip < ilimit) { size_t matchLength=0; - size_t offcode=STORE_REPCODE_1; + size_t offBase = REPCODE1_TO_OFFBASE; const BYTE* start=ip+1; U32 curr = (U32)(ip-base); @@ -1924,14 +1962,23 @@ } } /* first search (depth 0) */ - { size_t offsetFound = 999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); if (ml2 > matchLength) - matchLength = ml2, start = ip, offcode=offsetFound; + matchLength = ml2, start = ip, offBase = ofbCandidate; } if (matchLength < 4) { - ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + size_t const step = ((size_t)(ip-anchor) >> kSearchStrength); + ip += step + 1; /* jump faster over incompressible sections */ + /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time. + * In this mode we stop inserting every position into our tables, and only insert + * positions that we search, which is one in step positions. + * The exact cutoff is flexible, I've just chosen a number that is reasonably high, + * so we minimize the compression ratio loss in "normal" scenarios. This mode gets + * triggered once we've gone 2KB without finding any matches. + */ + ms->lazySkipping = step > kLazySkippingStep; continue; } @@ -1941,7 +1988,7 @@ ip ++; curr++; /* check repCode */ - if (offcode) { + if (offBase) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; @@ -1953,18 +2000,18 @@ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 1 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; /* search a better one */ } } @@ -1973,7 +2020,7 @@ ip ++; curr++; /* check repCode */ - if (offcode) { + if (offBase) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; @@ -1985,38 +2032,45 @@ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; + matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip; } } /* search match, depth 2 */ - { size_t offset2=999999999; - size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); + { size_t ofbCandidate = 999999999; + size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offcode = offset2, start = ip; + matchLength = ml2, offBase = ofbCandidate, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ - if (STORED_IS_OFFSET(offcode)) { - U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); + if (OFFBASE_IS_OFFSET(offBase)) { + U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); + offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase); } /* store sequence */ _storeSequence: { size_t const litLength = (size_t)(start - anchor); - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength); anchor = ip = start + matchLength; } + if (ms->lazySkipping) { + /* We've found a match, disable lazy skipping mode, and refill the hash cache. */ + if (searchMethod == search_rowHash) { + ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit); + } + ms->lazySkipping = 0; + } /* check immediate repcode */ while (ip <= ilimit) { @@ -2031,8 +2085,8 @@ /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); + offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -2098,7 +2152,6 @@ size_t ZSTD_compressBlock_lazy2_extDict_row( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2); } diff --git a/sys/contrib/zstd/lib/compress/zstd_ldm.h b/sys/contrib/zstd/lib/compress/zstd_ldm.h --- a/sys/contrib/zstd/lib/compress/zstd_ldm.h +++ b/sys/contrib/zstd/lib/compress/zstd_ldm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstd_ldm.c b/sys/contrib/zstd/lib/compress/zstd_ldm.c --- a/sys/contrib/zstd/lib/compress/zstd_ldm.c +++ b/sys/contrib/zstd/lib/compress/zstd_ldm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -242,11 +242,11 @@ switch(ms->cParams.strategy) { case ZSTD_fast: - ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); + ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break; case ZSTD_dfast: - ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); + ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx); break; case ZSTD_greedy: @@ -549,7 +549,7 @@ * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the - * the offset against maxDist directly. + * offset against maxDist directly. * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may @@ -711,7 +711,7 @@ rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, - STORE_OFFSET(sequence.offset), + OFFSET_TO_OFFBASE(sequence.offset), sequence.matchLength); ip += sequence.matchLength; } diff --git a/sys/contrib/zstd/lib/compress/zstd_ldm_geartab.h b/sys/contrib/zstd/lib/compress/zstd_ldm_geartab.h --- a/sys/contrib/zstd/lib/compress/zstd_ldm_geartab.h +++ b/sys/contrib/zstd/lib/compress/zstd_ldm_geartab.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstd_opt.h b/sys/contrib/zstd/lib/compress/zstd_opt.h --- a/sys/contrib/zstd/lib/compress/zstd_opt.h +++ b/sys/contrib/zstd/lib/compress/zstd_opt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstd_opt.c b/sys/contrib/zstd/lib/compress/zstd_opt.c --- a/sys/contrib/zstd/lib/compress/zstd_opt.c +++ b/sys/contrib/zstd/lib/compress/zstd_opt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,7 +16,7 @@ #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ #define ZSTD_MAX_PRICE (1<<30) -#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ +#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ /*-************************************* @@ -26,27 +26,35 @@ #if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat)) +# define WEIGHT(stat, opt) ((void)(opt), ZSTD_bitWeight(stat)) #elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) +# define WEIGHT(stat,opt) ((void)(opt), ZSTD_fracWeight(stat)) #else /* opt==approx, ultra==accurate */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) +# define WEIGHT(stat,opt) ((opt) ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) #endif +/* ZSTD_bitWeight() : + * provide estimated "cost" of a stat in full bits only */ MEM_STATIC U32 ZSTD_bitWeight(U32 stat) { return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); } +/* ZSTD_fracWeight() : + * provide fractional-bit "cost" of a stat, + * using linear interpolation approximation */ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) { U32 const stat = rawStat + 1; U32 const hb = ZSTD_highbit32(stat); U32 const BWeight = hb * BITCOST_MULTIPLIER; + /* Fweight was meant for "Fractional weight" + * but it's effectively a value between 1 and 2 + * using fixed point arithmetic */ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; U32 const weight = BWeight + FWeight; assert(hb + BITCOST_ACCURACY < 31); @@ -57,7 +65,7 @@ /* debugging function, * @return price in bytes as fractional value * for debug messages only */ -MEM_STATIC double ZSTD_fCost(U32 price) +MEM_STATIC double ZSTD_fCost(int price) { return (double)price / (BITCOST_MULTIPLIER*8); } @@ -88,20 +96,26 @@ return total; } -static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift) +typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e; + +static U32 +ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1) { U32 s, sum=0; - DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", + (unsigned)lastEltIndex+1, (unsigned)shift ); assert(shift < 30); for (s=0; s> shift); - sum += table[s]; + unsigned const base = base1 ? 1 : (table[s]>0); + unsigned const newStat = base + (table[s] >> shift); + sum += newStat; + table[s] = newStat; } return sum; } /* ZSTD_scaleStats() : - * reduce all elements in table is sum too large + * reduce all elt frequencies in table if sum too large * return the resulting sum of elements */ static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) { @@ -110,7 +124,7 @@ DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); assert(logTarget < 30); if (factor <= 1) return prevsum; - return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor)); + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed); } /* ZSTD_rescaleFreqs() : @@ -129,18 +143,22 @@ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); optPtr->priceType = zop_dynamic; - if (optPtr->litLengthSum == 0) { /* first block : init */ - if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ - DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); + if (optPtr->litLengthSum == 0) { /* no literals stats collected -> first block assumed -> init */ + + /* heuristic: use pre-defined stats for too small inputs */ + if (srcSize <= ZSTD_PREDEF_THRESHOLD) { + DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD); optPtr->priceType = zop_predef; } assert(optPtr->symbolCosts != NULL); if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { - /* huffman table presumed generated by dictionary */ + + /* huffman stats covering the full value set : table presumed generated by dictionary */ optPtr->priceType = zop_dynamic; if (compressedLiterals) { + /* generate literals statistics from huffman table */ unsigned lit; assert(optPtr->litFreq != NULL); optPtr->litSum = 0; @@ -188,13 +206,14 @@ optPtr->offCodeSum += optPtr->offCodeFreq[of]; } } - } else { /* not a dictionary */ + } else { /* first block, no dictionary */ assert(optPtr->litFreq != NULL); if (compressedLiterals) { + /* base initial cost of literals on direct frequency within src */ unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8, base_0possible); } { unsigned const baseLLfreqs[MaxLL+1] = { @@ -224,10 +243,9 @@ optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - } - } else { /* new block : re-use previous statistics, scaled down */ + } else { /* new block : scale down accumulated statistics */ if (compressedLiterals) optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); @@ -255,11 +273,14 @@ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ /* dynamic statistics */ - { U32 price = litLength * optPtr->litSumBasePrice; + { U32 price = optPtr->litSumBasePrice * litLength; + U32 const litPriceMax = optPtr->litSumBasePrice - BITCOST_MULTIPLIER; U32 u; + assert(optPtr->litSumBasePrice >= BITCOST_MULTIPLIER); for (u=0; u < litLength; u++) { - assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ - price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); + U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel); + if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax; + price -= litPrice; } return price; } @@ -272,10 +293,11 @@ assert(litLength <= ZSTD_BLOCKSIZE_MAX); if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); - /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX - * because it isn't representable in the zstd format. So instead just - * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block - * would be all literals. + + /* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX + * because it isn't representable in the zstd format. + * So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. + * In such a case, the block would be all literals. */ if (litLength == ZSTD_BLOCKSIZE_MAX) return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); @@ -289,24 +311,25 @@ } /* ZSTD_getMatchPrice() : - * Provides the cost of the match part (offset + matchLength) of a sequence + * Provides the cost of the match part (offset + matchLength) of a sequence. * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2 + * @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq() * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ FORCE_INLINE_TEMPLATE U32 -ZSTD_getMatchPrice(U32 const offcode, +ZSTD_getMatchPrice(U32 const offBase, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; - U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode)); + U32 const offCode = ZSTD_highbit32(offBase); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); - if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ - return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); + if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */ + return WEIGHT(mlBase, optLevel) + + ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */ /* dynamic statistics */ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); @@ -325,10 +348,10 @@ } /* ZSTD_updateStats() : - * assumption : literals + litLengtn <= iend */ + * assumption : literals + litLength <= iend */ static void ZSTD_updateStats(optState_t* const optPtr, U32 litLength, const BYTE* literals, - U32 offsetCode, U32 matchLength) + U32 offBase, U32 matchLength) { /* literals */ if (ZSTD_compressedLiterals(optPtr)) { @@ -344,8 +367,8 @@ optPtr->litLengthSum++; } - /* offset code : expected to follow storeSeq() numeric representation */ - { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode)); + /* offset code : follows storeSeq() numeric representation */ + { U32 const offCode = ZSTD_highbit32(offBase); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; @@ -552,16 +575,17 @@ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } -FORCE_INLINE_TEMPLATE -U32 ZSTD_insertBtAndGetAllMatches ( - ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ - const U32 lengthToBeat, - U32 const mls /* template */) +FORCE_INLINE_TEMPLATE U32 +ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ + ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, + const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ + const U32 lengthToBeat, + const U32 mls /* template */) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); @@ -644,7 +668,7 @@ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; - matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */ + matches[mnum].off = REPCODE_TO_OFFBASE(repCode - ll0 + 1); /* expect value between 1 and 3 */ matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) @@ -673,7 +697,7 @@ bestLength = mlen; assert(curr > matchIndex3); assert(mnum==0); /* no prior solution */ - matches[0].off = STORE_OFFSET(curr - matchIndex3); + matches[0].off = OFFSET_TO_OFFBASE(curr - matchIndex3); matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | @@ -706,13 +730,13 @@ } if (matchLength > bestLength) { - DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); + DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = STORE_OFFSET(curr - matchIndex); + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) @@ -754,12 +778,12 @@ if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; - DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); + DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)", + (U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex)); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = STORE_OFFSET(curr - matchIndex); + matches[mnum].off = OFFSET_TO_OFFBASE(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) @@ -960,7 +984,7 @@ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) { U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; - /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ + /* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; /* Ensure that current block position is not outside of the match */ @@ -971,11 +995,11 @@ } if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { - U32 const candidateOffCode = STORE_OFFSET(optLdm->offset); - DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u", - candidateOffCode, candidateMatchLength, currPosInBlock); + U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset); + DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u", + candidateOffBase, candidateMatchLength, currPosInBlock); matches[*nbMatches].len = candidateMatchLength; - matches[*nbMatches].off = candidateOffCode; + matches[*nbMatches].off = candidateOffBase; (*nbMatches)++; } } @@ -1062,6 +1086,8 @@ ZSTD_optimal_t lastSequence; ZSTD_optLdm_t optLdm; + ZSTD_memset(&lastSequence, 0, sizeof(ZSTD_optimal_t)); + optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); @@ -1098,14 +1124,14 @@ /* large match -> immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; - U32 const maxOffcode = matches[nbMatches-1].off; - DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", - nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart)); + U32 const maxOffBase = matches[nbMatches-1].off; + DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series", + nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { lastSequence.litlen = litlen; lastSequence.mlen = maxML; - lastSequence.off = maxOffcode; + lastSequence.off = maxOffBase; DEBUGLOG(6, "large match (%u>%u), immediate encoding", maxML, sufficient_len); cur = 0; @@ -1122,15 +1148,15 @@ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ } for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offcode = matches[matchNb].off; + U32 const offBase = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { - U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel); + U32 const matchPrice = ZSTD_getMatchPrice(offBase, pos, optStatePtr, optLevel); U32 const sequencePrice = literalsPrice + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", - pos, ZSTD_fCost(sequencePrice)); + pos, ZSTD_fCost((int)sequencePrice)); opt[pos].mlen = pos; - opt[pos].off = offcode; + opt[pos].off = offBase; opt[pos].litlen = litlen; opt[pos].price = (int)sequencePrice; } } @@ -1230,7 +1256,7 @@ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; - DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", + DEBUGLOG(7, "testing match %u => offBase=%4u, mlen=%2u, llen=%2u", matchNb, matches[matchNb].off, lastML, litlen); for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ @@ -1296,7 +1322,7 @@ for (storePos=storeStart; storePos <= storeEnd; storePos++) { U32 const llen = opt[storePos].litlen; U32 const mlen = opt[storePos].mlen; - U32 const offCode = opt[storePos].off; + U32 const offBase = opt[storePos].off; U32 const advance = llen + mlen; DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", anchor - istart, (unsigned)llen, (unsigned)mlen); @@ -1308,8 +1334,8 @@ } assert(anchor + llen <= iend); - ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen); + ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen); anchor += advance; ip = anchor; } } @@ -1349,7 +1375,7 @@ /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. * only works on first block, with no dictionary and no ldm. - * this function cannot error, hence its contract must be respected. + * this function cannot error out, its narrow contract must be respected. */ static void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, @@ -1368,7 +1394,7 @@ ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ - /* invalidate first scan from history */ + /* invalidate first scan from history, only keep entropy stats */ ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; @@ -1392,20 +1418,20 @@ U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); - /* 2-pass strategy: + /* 2-passes strategy: * this strategy makes a first pass over first block to collect statistics - * and seed next round's statistics with it. - * After 1st pass, function forgets everything, and starts a new block. + * in order to seed next round's statistics with it. + * After 1st pass, function forgets history, and starts a new block. * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), - * the cost is 2x cpu time on first block. */ + ** the cost is 2x cpu time on first block. */ assert(srcSize <= ZSTD_BLOCKSIZE_MAX); if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ - && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ - && (srcSize > ZSTD_PREDEF_THRESHOLD) + && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ + && (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */ ) { ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } diff --git a/sys/contrib/zstd/lib/compress/zstdmt_compress.h b/sys/contrib/zstd/lib/compress/zstdmt_compress.h --- a/sys/contrib/zstd/lib/compress/zstdmt_compress.h +++ b/sys/contrib/zstd/lib/compress/zstdmt_compress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/compress/zstdmt_compress.c b/sys/contrib/zstd/lib/compress/zstdmt_compress.c --- a/sys/contrib/zstd/lib/compress/zstdmt_compress.c +++ b/sys/contrib/zstd/lib/compress/zstdmt_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,6 +20,7 @@ /* ====== Dependencies ====== */ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */ #include "../common/mem.h" /* MEM_STATIC */ #include "../common/pool.h" /* threadpool */ @@ -266,11 +267,11 @@ * 1 buffer for input loading * 1 buffer for "next input" when submitting current one * 1 buffer stuck in queue */ -#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3 +#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) /* After a worker releases its rawSeqStore, it is immediately ready for reuse. * So we only need one seq buffer per worker. */ -#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers +#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) /* ===== Seq Pool Wrapper ====== */ @@ -719,7 +720,7 @@ ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ - size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); if (ZSTD_isError(hSize)) JOB_ERROR(hSize); DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); ZSTD_invalidateRepCodes(cctx); @@ -737,7 +738,7 @@ DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); assert(job->cSize == 0); for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { - size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); + size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); ip += chunkSize; op += cSize; assert(op < oend); @@ -757,8 +758,8 @@ size_t const lastBlockSize1 = job->src.size & (chunkSize-1); size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; size_t const cSize = (job->lastJob) ? - ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : - ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); + ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) : + ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize); if (ZSTD_isError(cSize)) JOB_ERROR(cSize); lastCBlockSize = cSize; } } @@ -1734,7 +1735,7 @@ } } else { /* We have enough bytes buffered to initialize the hash, - * and are have processed enough bytes to find a sync point. + * and have processed enough bytes to find a sync point. * Start scanning at the beginning of the input. */ assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); @@ -1761,17 +1762,24 @@ * then a block will be emitted anyways, but this is okay, since if we * are already synchronized we will remain synchronized. */ + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); + /* This assert is very expensive, and Debian compiles with asserts enabled. + * So disable it for now. We can get similar coverage by checking it at the + * beginning & end of the loop. + * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); + */ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; + ++pos; /* for assert */ break; } } + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); return syncPoint; } diff --git a/sys/contrib/zstd/lib/decompress/huf_decompress.c b/sys/contrib/zstd/lib/decompress/huf_decompress.c --- a/sys/contrib/zstd/lib/decompress/huf_decompress.c +++ b/sys/contrib/zstd/lib/decompress/huf_decompress.c @@ -1,7 +1,7 @@ /* ****************************************************************** * huff0 huffman decoder, * part of Finite State Entropy library - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * * You can contact the author at : * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy @@ -19,10 +19,10 @@ #include "../common/compiler.h" #include "../common/bitstream.h" /* BIT_* */ #include "../common/fse.h" /* to compress headers */ -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" #include "../common/zstd_internal.h" +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_countTrailingZeros64 */ /* ************************************************************** * Constants @@ -43,10 +43,14 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif -#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2 -# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is + * supported at runtime, so we can add the BMI2 target attribute. + * When it is disabled, we will still get BMI2 if it is enabled statically. + */ +#if DYNAMIC_BMI2 +# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE #else -# define HUF_ASM_X86_64_BMI2_ATTRS +# define HUF_FAST_BMI2_ATTRS #endif #ifdef __cplusplus @@ -56,18 +60,12 @@ #endif #define HUF_ASM_DECL HUF_EXTERN_C -#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +#if DYNAMIC_BMI2 # define HUF_NEED_BMI2_FUNCTION 1 #else # define HUF_NEED_BMI2_FUNCTION 0 #endif -#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) -# define HUF_NEED_DEFAULT_FUNCTION 1 -#else -# define HUF_NEED_DEFAULT_FUNCTION 0 -#endif - /* ************************************************************** * Error Management ****************************************************************/ @@ -84,6 +82,11 @@ /* ************************************************************** * BMI2 Variant Wrappers ****************************************************************/ +typedef size_t (*HUF_DecompressUsingDTableFn)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); + #if DYNAMIC_BMI2 #define HUF_DGEN(fn) \ @@ -105,9 +108,9 @@ } \ \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - if (bmi2) { \ + if (flags & HUF_flags_bmi2) { \ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ @@ -117,9 +120,9 @@ #define HUF_DGEN(fn) \ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + size_t cSrcSize, HUF_DTable const* DTable, int flags) \ { \ - (void)bmi2; \ + (void)flags; \ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } @@ -138,15 +141,28 @@ return dtd; } -#if ZSTD_ENABLE_ASM_X86_64_BMI2 - -static size_t HUF_initDStream(BYTE const* ip) { +static size_t HUF_initFastDStream(BYTE const* ip) { BYTE const lastByte = ip[7]; - size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0; size_t const value = MEM_readLEST(ip) | 1; assert(bitsConsumed <= 8); + assert(sizeof(size_t) == 8); return value << bitsConsumed; } + + +/** + * The input/output arguments to the Huffman fast decoding loop: + * + * ip [in/out] - The input pointers, must be updated to reflect what is consumed. + * op [in/out] - The output pointers, must be updated to reflect what is written. + * bits [in/out] - The bitstream containers, must be updated to reflect the current state. + * dt [in] - The decoding table. + * ilimit [in] - The input limit, stop when any input pointer is below ilimit. + * oend [in] - The end of the output stream. op[3] must not cross oend. + * iend [in] - The end of each input stream. ip[i] may cross iend[i], + * as long as it is above ilimit, but that indicates corruption. + */ typedef struct { BYTE const* ip[4]; BYTE* op[4]; @@ -155,15 +171,17 @@ BYTE const* ilimit; BYTE* oend; BYTE const* iend[4]; -} HUF_DecompressAsmArgs; +} HUF_DecompressFastArgs; + +typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*); /** - * Initializes args for the asm decoding loop. - * @returns 0 on success - * 1 if the fallback implementation should be used. + * Initializes args for the fast decoding loop. + * @returns 1 on success + * 0 if the fallback implementation should be used. * Or an error code on failure. */ -static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) { void const* dt = DTable + 1; U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; @@ -172,9 +190,11 @@ BYTE* const oend = (BYTE*)dst + dstSize; - /* The following condition is false on x32 platform, - * but HUF_asm is not compatible with this ABI */ - if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1; + /* The fast decoding loop assumes 64-bit little-endian. + * This condition is false on x32. + */ + if (!MEM_isLittleEndian() || MEM_32bits()) + return 0; /* strict minimum : jump table + 1 byte per stream */ if (srcSize < 10) @@ -185,7 +205,7 @@ * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. */ if (dtLog != HUF_DECODER_FAST_TABLELOG) - return 1; + return 0; /* Read the jump table. */ { @@ -199,13 +219,13 @@ args->iend[2] = args->iend[1] + length2; args->iend[3] = args->iend[2] + length3; - /* HUF_initDStream() requires this, and this small of an input + /* HUF_initFastDStream() requires this, and this small of an input * won't benefit from the ASM loop anyways. * length1 must be >= 16 so that ip[0] >= ilimit before the loop * starts. */ if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8) - return 1; + return 0; if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ } /* ip[] contains the position that is currently loaded into bits[]. */ @@ -222,7 +242,7 @@ /* No point to call the ASM loop for tiny outputs. */ if (args->op[3] >= oend) - return 1; + return 0; /* bits[] is the bit container. * It is read from the MSB down to the LSB. @@ -231,10 +251,10 @@ * set, so that CountTrailingZeros(bits[]) can be used * to count how many bits we've consumed. */ - args->bits[0] = HUF_initDStream(args->ip[0]); - args->bits[1] = HUF_initDStream(args->ip[1]); - args->bits[2] = HUF_initDStream(args->ip[2]); - args->bits[3] = HUF_initDStream(args->ip[3]); + args->bits[0] = HUF_initFastDStream(args->ip[0]); + args->bits[1] = HUF_initFastDStream(args->ip[1]); + args->bits[2] = HUF_initFastDStream(args->ip[2]); + args->bits[3] = HUF_initFastDStream(args->ip[3]); /* If ip[] >= ilimit, it is guaranteed to be safe to * reload bits[]. It may be beyond its section, but is @@ -245,10 +265,10 @@ args->oend = oend; args->dt = dt; - return 0; + return 1; } -static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd) +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd) { /* Validate that we haven't overwritten. */ if (args->op[stream] > segmentEnd) @@ -262,15 +282,15 @@ return ERROR(corruption_detected); /* Construct the BIT_DStream_t. */ - bit->bitContainer = MEM_readLE64(args->ip[stream]); - bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]); + assert(sizeof(size_t) == 8); + bit->bitContainer = MEM_readLEST(args->ip[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]); bit->start = (const char*)args->iend[0]; bit->limitPtr = bit->start + sizeof(size_t); bit->ptr = (const char*)args->ip[stream]; return 0; } -#endif #ifndef HUF_FORCE_DECOMPRESS_X2 @@ -287,10 +307,11 @@ static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { - D4 = (symbol << 8) + nbBits; + D4 = (U64)((symbol << 8) + nbBits); } else { - D4 = symbol + (nbBits << 8); + D4 = (U64)(symbol + (nbBits << 8)); } + assert(D4 < (1U << 16)); D4 *= 0x0001000100010001ULL; return D4; } @@ -333,13 +354,7 @@ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; } HUF_ReadDTableX1_Workspace; - -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags) { U32 tableLog = 0; U32 nbSymbols = 0; @@ -354,7 +369,7 @@ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags); if (HUF_isError(iSize)) return iSize; @@ -381,9 +396,8 @@ * rankStart[0] is not filled because there are no entries in the table for * weight 0. */ - { - int n; - int nextRankStart = 0; + { int n; + U32 nextRankStart = 0; int const unroll = 4; int const nLimit = (int)nbSymbols - unroll + 1; for (n=0; n<(int)tableLog+1; n++) { @@ -410,10 +424,9 @@ * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ - { - U32 w; - int symbol=wksp->rankVal[0]; - int rankStart=0; + { U32 w; + int symbol = wksp->rankVal[0]; + int rankStart = 0; for (w=1; wrankVal[w]; int const length = (1 << w) >> 1; @@ -523,7 +536,7 @@ while (p < pEnd) HUF_DECODE_SYMBOLX1_0(p, bitDPtr); - return pEnd-pStart; + return (size_t)(pEnd-pStart); } FORCE_INLINE_TEMPLATE size_t @@ -549,6 +562,10 @@ return dstSize; } +/* HUF_decompress4X1_usingDTable_internal_body(): + * Conditions : + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X1_usingDTable_internal_body( void* dst, size_t dstSize, @@ -592,6 +609,7 @@ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -654,38 +672,142 @@ } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; -static HUF_ASM_X86_64_BMI2_ATTRS +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + U16 const* const dtable = (U16 const*)args->dt; + BYTE* const oend = args->oend; + BYTE const* const ilimit = args->ilimit; + + /* Copy the arguments to local variables */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + int symbol; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= (stream == 3 ? oend : op[stream + 1])); + assert(ip[stream] >= ilimit); + } +#endif + /* Compute olimit */ + { + /* Each iteration produces 5 output symbols per stream */ + size_t const oiters = (size_t)(oend - op[3]) / 5; + /* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes + * per stream. + */ + size_t const iiters = (size_t)(ip[0] - ilimit) / 7; + /* We can safely run iters iterations before running bounds checks */ + size_t const iters = MIN(oiters, iiters); + size_t const symbols = iters * 5; + + /* We can simply check that op[3] < olimit, instead of checking all + * of our bounds, since we can't hit the other bounds until we've run + * iters iterations, which only happens when op[3] == olimit. + */ + olimit = op[3] + symbols; + + /* Exit fast decoding loop once we get close to the end. */ + if (op[3] + 20 > olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + + do { + /* Decode 5 symbols in each of the 4 streams */ + for (symbol = 0; symbol < 5; ++symbol) { + for (stream = 0; stream < 4; ++stream) { + int const index = (int)(bits[stream] >> 53); + int const entry = (int)dtable[index]; + bits[stream] <<= (entry & 63); + op[stream][symbol] = (BYTE)((entry >> 8) & 0xFF); + } + } + /* Reload the bitstreams */ + for (stream = 0; stream < 4; ++stream) { + int const ctz = ZSTD_countTrailingZeros64(bits[stream]); + int const nbBits = ctz & 7; + int const nbBytes = ctz >> 3; + op[stream] += 5; + ip[stream] -= nbBytes; + bits[stream] = MEM_read64(ip[stream]) | 1; + bits[stream] <<= nbBits; + } + } while (op[3] < olimit); + } + +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + +/** + * @returns @p dstSize on success (>= 6) + * 0 if the fallback implementation should be used + * An error if an error occurred + */ +static HUF_FAST_BMI2_ATTRS size_t -HUF_decompress4X1_usingDTable_internal_bmi2_asm( +HUF_decompress4X1_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; const BYTE* const iend = (const BYTE*)cSrc + 6; BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; - { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); - FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + HUF_DecompressFastArgs args; + { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); + if (ret == 0) + return 0; } assert(args.ip[0] >= args.ilimit); - HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args); + loopFn(&args); /* Our loop guarantees that ip[] >= ilimit and that we haven't * overwritten any op[]. @@ -698,8 +820,7 @@ (void)iend; /* finish bit streams one by one. */ - { - size_t const segmentSize = (dstSize+3) / 4; + { size_t const segmentSize = (dstSize+3) / 4; BYTE* segmentEnd = (BYTE*)dst; int i; for (i = 0; i < 4; ++i) { @@ -716,97 +837,59 @@ } /* decoded size */ + assert(dstSize != 0); return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ - -typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, - const void *cSrc, - size_t cSrcSize, - const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop; + } #endif -} - -size_t HUF_decompress1X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - const BYTE* ip = (const BYTE*) cSrc; - - size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); - if (HUF_isError(hSize)) return hSize; - if (hSize >= cSrcSize) return ERROR(srcSize_wrong); - ip += hSize; cSrcSize -= hSize; - - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X1_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + if (!(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); -} - -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } - #endif /* HUF_FORCE_DECOMPRESS_X2 */ @@ -989,7 +1072,7 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32* rankStart, rankValCol_t* rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32* const rankVal = rankValOrigin[0]; @@ -1044,14 +1127,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - -size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, - const void* src, size_t srcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); @@ -1073,7 +1149,7 @@ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags); if (HUF_isError(iSize)) return iSize; /* check result */ @@ -1244,6 +1320,11 @@ /* decoded size */ return dstSize; } + +/* HUF_decompress4X2_usingDTable_internal_body(): + * Conditions: + * @dstSize >= 6 + */ FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -1284,8 +1365,9 @@ DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ + if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); @@ -1370,36 +1452,177 @@ } #endif -#if HUF_NEED_DEFAULT_FUNCTION static size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, size_t cSrcSize, HUF_DTable const* DTable) { return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); } -#endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 -HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN; + +#endif + +static HUF_FAST_BMI2_ATTRS +void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args) +{ + U64 bits[4]; + BYTE const* ip[4]; + BYTE* op[4]; + BYTE* oend[4]; + HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt; + BYTE const* const ilimit = args->ilimit; + + /* Copy the arguments to local registers. */ + ZSTD_memcpy(&bits, &args->bits, sizeof(bits)); + ZSTD_memcpy((void*)(&ip), &args->ip, sizeof(ip)); + ZSTD_memcpy(&op, &args->op, sizeof(op)); + + oend[0] = op[1]; + oend[1] = op[2]; + oend[2] = op[3]; + oend[3] = args->oend; + + assert(MEM_isLittleEndian()); + assert(!MEM_32bits()); + + for (;;) { + BYTE* olimit; + int stream; + int symbol; + + /* Assert loop preconditions */ +#ifndef NDEBUG + for (stream = 0; stream < 4; ++stream) { + assert(op[stream] <= oend[stream]); + assert(ip[stream] >= ilimit); + } +#endif + /* Compute olimit */ + { + /* Each loop does 5 table lookups for each of the 4 streams. + * Each table lookup consumes up to 11 bits of input, and produces + * up to 2 bytes of output. + */ + /* We can consume up to 7 bytes of input per iteration per stream. + * We also know that each input pointer is >= ip[0]. So we can run + * iters loops before running out of input. + */ + size_t iters = (size_t)(ip[0] - ilimit) / 7; + /* Each iteration can produce up to 10 bytes of output per stream. + * Each output stream my advance at different rates. So take the + * minimum number of safe iterations among all the output streams. + */ + for (stream = 0; stream < 4; ++stream) { + size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10; + iters = MIN(iters, oiters); + } + + /* Each iteration produces at least 5 output symbols. So until + * op[3] crosses olimit, we know we haven't executed iters + * iterations yet. This saves us maintaining an iters counter, + * at the expense of computing the remaining # of iterations + * more frequently. + */ + olimit = op[3] + (iters * 5); + + /* Exit the fast decoding loop if we are too close to the end. */ + if (op[3] + 10 > olimit) + break; + + /* Exit the decoding loop if any input pointer has crossed the + * previous one. This indicates corruption, and a precondition + * to our loop is that ip[i] >= ip[0]. + */ + for (stream = 1; stream < 4; ++stream) { + if (ip[stream] < ip[stream - 1]) + goto _out; + } + } + +#ifndef NDEBUG + for (stream = 1; stream < 4; ++stream) { + assert(ip[stream] >= ip[stream - 1]); + } +#endif + + do { + /* Do 5 table lookups for each of the first 3 streams */ + for (symbol = 0; symbol < 5; ++symbol) { + for (stream = 0; stream < 3; ++stream) { + int const index = (int)(bits[stream] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[stream], entry.sequence); + bits[stream] <<= (entry.nbBits); + op[stream] += (entry.length); + } + } + /* Do 1 table lookup from the final stream */ + { + int const index = (int)(bits[3] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[3], entry.sequence); + bits[3] <<= (entry.nbBits); + op[3] += (entry.length); + } + /* Do 4 table lookups from the final stream & reload bitstreams */ + for (stream = 0; stream < 4; ++stream) { + /* Do a table lookup from the final stream. + * This is interleaved with the reloading to reduce register + * pressure. This shouldn't be necessary, but compilers can + * struggle with codegen with high register pressure. + */ + { + int const index = (int)(bits[3] >> 53); + HUF_DEltX2 const entry = dtable[index]; + MEM_write16(op[3], entry.sequence); + bits[3] <<= (entry.nbBits); + op[3] += (entry.length); + } + /* Reload the bistreams. The final bitstream must be reloaded + * after the 5th symbol was decoded. + */ + { + int const ctz = ZSTD_countTrailingZeros64(bits[stream]); + int const nbBits = ctz & 7; + int const nbBytes = ctz >> 3; + ip[stream] -= nbBytes; + bits[stream] = MEM_read64(ip[stream]) | 1; + bits[stream] <<= nbBits; + } + } + } while (op[3] < olimit); + } -static HUF_ASM_X86_64_BMI2_ATTRS size_t -HUF_decompress4X2_usingDTable_internal_bmi2_asm( +_out: + + /* Save the final values of each of the state variables back to args. */ + ZSTD_memcpy(&args->bits, &bits, sizeof(bits)); + ZSTD_memcpy((void*)(&args->ip), &ip, sizeof(ip)); + ZSTD_memcpy(&args->op, &op, sizeof(op)); +} + + +static HUF_FAST_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_fast( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) { + const HUF_DTable* DTable, + HUF_DecompressFastLoopFn loopFn) { void const* dt = DTable + 1; const BYTE* const iend = (const BYTE*)cSrc + 6; BYTE* const oend = (BYTE*)dst + dstSize; - HUF_DecompressAsmArgs args; + HUF_DecompressFastArgs args; { - size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); FORWARD_IF_ERROR(ret, "Failed to init asm args"); - if (ret != 0) - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (ret == 0) + return 0; } assert(args.ip[0] >= args.ilimit); - HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args); + loopFn(&args); /* note : op4 already verified within main loop */ assert(args.ip[0] >= iend); @@ -1430,91 +1653,72 @@ /* decoded size */ return dstSize; } -#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) + size_t cSrcSize, HUF_DTable const* DTable, int flags) { + HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default; + HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop; + #if DYNAMIC_BMI2 - if (bmi2) { + if (flags & HUF_flags_bmi2) { + fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2; # if ZSTD_ENABLE_ASM_X86_64_BMI2 - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -# else - return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } # endif + } else { + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } -#else - (void)bmi2; #endif #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) - return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); -#else - return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); + if (!(flags & HUF_flags_disableAsm)) { + loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop; + } #endif + + if (!(flags & HUF_flags_disableFast)) { + size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn); + if (ret != 0) + return ret; + } + return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable); } HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -size_t HUF_decompress1X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -} - size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); -} - - -size_t HUF_decompress4X2_usingDTable( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc dtd = HUF_getDTableDesc(DTable); - if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, flags); } -static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize, int bmi2) + void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, - workSpace, wkspSize); + workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) -{ - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); -} - - #endif /* HUF_FORCE_DECOMPRESS_X1 */ @@ -1522,44 +1726,6 @@ /* Universal decompression selectors */ /* ***********************************/ -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - -size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif -} - #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; @@ -1614,36 +1780,9 @@ #endif } - -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, - size_t dstSize, const void* cSrc, - size_t cSrcSize, void* workSpace, - size_t wkspSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize == 0) return ERROR(corruption_detected); - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif - } -} - size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, - void* workSpace, size_t wkspSize) + void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1656,71 +1795,71 @@ (void)algoNb; assert(algoNb == 0); return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #else return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): + cSrcSize, workSpace, wkspSize, flags): HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); + cSrcSize, workSpace, wkspSize, flags); #endif } } -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } #ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize, flags); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags); } #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags) { DTableDesc const dtd = HUF_getDTableDesc(DTable); #if defined(HUF_FORCE_DECOMPRESS_X1) (void)dtd; assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)dtd; assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags); #endif } -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags) { /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1730,160 +1869,14 @@ #if defined(HUF_FORCE_DECOMPRESS_X1) (void)algoNb; assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #elif defined(HUF_FORCE_DECOMPRESS_X2) (void)algoNb; assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #else - return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : - HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) : + HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags); #endif } } - -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX1_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX2_wksp(DTable, src, srcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); -} -#endif - -typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); - -size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) - static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; -#endif - - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); -#else - return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); -#endif - } -} - -size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - /* validation checks */ - if (dstSize == 0) return ERROR(dstSize_tooSmall); - if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ - if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ - if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#else - return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : - HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; -#endif - } -} - -size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} - -size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize) -{ - U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, - workSpace, sizeof(workSpace)); -} -#endif diff --git a/sys/contrib/zstd/lib/decompress/huf_decompress_amd64.S b/sys/contrib/zstd/lib/decompress/huf_decompress_amd64.S --- a/sys/contrib/zstd/lib/decompress/huf_decompress_amd64.S +++ b/sys/contrib/zstd/lib/decompress/huf_decompress_amd64.S @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -30,14 +30,14 @@ * TODO: Support Windows calling convention. */ -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) -ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) -.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop -.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_fast_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_fast_asm_loop) +.global HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global HUF_decompress4X2_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_fast_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_fast_asm_loop .text /* Sets up register mappings for clarity. @@ -95,8 +95,9 @@ /* Define both _HUF_* & HUF_* symbols because MacOS * C symbols are prefixed with '_' & Linux symbols aren't. */ -_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: +_HUF_decompress4X1_usingDTable_internal_fast_asm_loop: +HUF_decompress4X1_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx @@ -350,8 +351,9 @@ pop %rax ret -_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: -HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: +_HUF_decompress4X2_usingDTable_internal_fast_asm_loop: +HUF_decompress4X2_usingDTable_internal_fast_asm_loop: + ZSTD_CET_ENDBRANCH /* Save all registers - even if they are callee saved for simplicity. */ push %rax push %rbx @@ -427,41 +429,30 @@ /* r15 = (ip0 - ilimit) / 7 */ movq %rdx, %r15 - movabsq $-3689348814741910323, %rdx - movq 8(%rsp), %rax /* rax = oend0 */ - subq %op0, %rax /* rax = oend0 - op0 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + /* r15 = min(r15, min(oend0 - op0, oend1 - op1, oend2 - op2, oend3 - op3) / 10) */ + movq 8(%rsp), %rax /* rax = oend0 */ + subq %op0, %rax /* rax = oend0 - op0 */ + movq 16(%rsp), %rdx /* rdx = oend1 */ + subq %op1, %rdx /* rdx = oend1 - op1 */ - movabsq $-3689348814741910323, %rdx - movq 16(%rsp), %rax /* rax = oend1 */ - subq %op1, %rax /* rax = oend1 - op1 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - movabsq $-3689348814741910323, %rdx movq 24(%rsp), %rax /* rax = oend2 */ subq %op2, %rax /* rax = oend2 - op2 */ - mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ - /* r15 = min(%rdx, %r15) */ - cmpq %rdx, %r15 - cmova %rdx, %r15 + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ - movabsq $-3689348814741910323, %rdx movq 32(%rsp), %rax /* rax = oend3 */ subq %op3, %rax /* rax = oend3 - op3 */ + + cmpq %rax, %rdx + cmova %rax, %rdx /* rdx = min(%rdx, %rax) */ + + movabsq $-3689348814741910323, %rax mulq %rdx - shrq $3, %rdx /* rdx = rax / 10 */ + shrq $3, %rdx /* rdx = rdx / 10 */ /* r15 = min(%rdx, %r15) */ cmpq %rdx, %r15 diff --git a/sys/contrib/zstd/lib/decompress/zstd_ddict.h b/sys/contrib/zstd/lib/decompress/zstd_ddict.h --- a/sys/contrib/zstd/lib/decompress/zstd_ddict.h +++ b/sys/contrib/zstd/lib/decompress/zstd_ddict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/decompress/zstd_ddict.c b/sys/contrib/zstd/lib/decompress/zstd_ddict.c --- a/sys/contrib/zstd/lib/decompress/zstd_ddict.c +++ b/sys/contrib/zstd/lib/decompress/zstd_ddict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,12 +14,12 @@ /*-******************************************************* * Dependencies *********************************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" @@ -134,7 +134,7 @@ ZSTD_memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; - ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); @@ -240,5 +240,5 @@ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; - return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); + return ddict->dictID; } diff --git a/sys/contrib/zstd/lib/decompress/zstd_decompress.c b/sys/contrib/zstd/lib/decompress/zstd_decompress.c --- a/sys/contrib/zstd/lib/decompress/zstd_decompress.c +++ b/sys/contrib/zstd/lib/decompress/zstd_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -55,17 +55,18 @@ /*-******************************************************* * Dependencies *********************************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/xxhash.h" /* XXH64_reset, XXH64_update, XXH64_digest, XXH64 */ #include "../common/zstd_internal.h" /* blockProperties_t */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "../legacy/zstd_legacy.h" @@ -78,11 +79,11 @@ *************************************/ #define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 -#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. - * Currently, that means a 0.75 load factor. - * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded - * the load factor of the ddict hash set. - */ +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. + * Currently, that means a 0.75 load factor. + * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded + * the load factor of the ddict hash set. + */ #define DDICT_HASHSET_TABLE_BASE_SIZE 64 #define DDICT_HASHSET_RESIZE_FACTOR 2 @@ -243,6 +244,7 @@ dctx->outBufferMode = ZSTD_bm_buffered; dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; + dctx->disableHufAsm = 0; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) @@ -438,16 +440,40 @@ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ +** or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); - ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ - if (srcSize < minInputSize) return minInputSize; - RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); + DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize); + + if (srcSize > 0) { + /* note : technically could be considered an assert(), since it's an invalid entry */ + RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0"); + } + if (srcSize < minInputSize) { + if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) { + /* when receiving less than @minInputSize bytes, + * control these bytes at least correspond to a supported magic number + * in order to error out early if they don't. + **/ + size_t const toCopy = MIN(4, srcSize); + unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER); + assert(src != NULL); + ZSTD_memcpy(hbuf, src, toCopy); + if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) { + /* not a zstd frame : let's check if it's a skippable frame */ + MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START); + ZSTD_memcpy(hbuf, src, toCopy); + if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) { + RETURN_ERROR(prefix_unknown, + "first bytes don't correspond to any supported magic number"); + } } } + return minInputSize; + } + ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */ if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { @@ -563,49 +589,52 @@ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); - { - size_t const skippableSize = skippableHeaderSize + sizeU32; + { size_t const skippableSize = skippableHeaderSize + sizeU32; RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } /*! ZSTD_readSkippableFrame() : - * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * Retrieves content of a skippable frame, and writes it to dst buffer. * * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested * in the magicVariant. * - * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. * * @return : number of bytes written or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, - const void* src, size_t srcSize) +size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, + unsigned* magicVariant, /* optional, can be NULL */ + const void* src, size_t srcSize) { - U32 const magicNumber = MEM_readLE32(src); - size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); - size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; - - /* check input validity */ - RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); - RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); - RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); - /* deliver payload */ - if (skippableContentSize > 0 && dst != NULL) - ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); - if (magicVariant != NULL) - *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; - return skippableContentSize; + { U32 const magicNumber = MEM_readLE32(src); + size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); + size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; + + /* check input validity */ + RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); + RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + + /* deliver payload */ + if (skippableContentSize > 0 && dst != NULL) + ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); + if (magicVariant != NULL) + *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; + return skippableContentSize; + } } /** ZSTD_findDecompressedSize() : - * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames - * @return : decompressed size of the frames contained */ + * note: compatible with legacy mode + * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; @@ -615,9 +644,7 @@ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); - if (ZSTD_isError(skippableSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR; assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; @@ -625,17 +652,17 @@ continue; } - { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize); + if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs; - /* check for overflow */ - if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; - totalDstSize += ret; + if (totalDstSize + fcs < totalDstSize) + return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */ + totalDstSize += fcs; } + /* skip to next frame */ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); - if (ZSTD_isError(frameSrcSize)) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR; + assert(frameSrcSize <= srcSize); src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; @@ -757,10 +784,11 @@ ip += 4; } + frameSizeInfo.nbBlocks = nbBlocks; frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize - : nbBlocks * zfh.blockSizeMax; + : (unsigned long long)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } @@ -800,6 +828,48 @@ return bound; } +size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) +{ + size_t margin = 0; + unsigned maxBlockSize = 0; + + /* Iterate over each frame */ + while (srcSize > 0) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; + ZSTD_frameHeader zfh; + + FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) + return ERROR(corruption_detected); + + if (zfh.frameType == ZSTD_frame) { + /* Add the frame header to our margin */ + margin += zfh.headerSize; + /* Add the checksum to our margin */ + margin += zfh.checksumFlag ? 4 : 0; + /* Add 3 bytes per block */ + margin += 3 * frameSizeInfo.nbBlocks; + + /* Compute the max block size */ + maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); + } else { + assert(zfh.frameType == ZSTD_skippableFrame); + /* Add the entire skippable frame size to our margin. */ + margin += compressedSize; + } + + assert(srcSize >= compressedSize); + src = (const BYTE*)src + compressedSize; + srcSize -= compressedSize; + } + + /* Add the max block size back to the margin. */ + margin += maxBlockSize; + + return margin; +} /*-************************************************************* * Frame decoding @@ -825,7 +895,7 @@ if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } - ZSTD_memcpy(dst, src, srcSize); + ZSTD_memmove(dst, src, srcSize); return srcSize; } @@ -903,6 +973,7 @@ /* Loop on each block */ while (1) { + BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); @@ -912,16 +983,34 @@ remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + if (ip >= op && ip < oBlockEnd) { + /* We are decompressing in-place. Limit the output pointer so that we + * don't overwrite the block that we are currently reading. This will + * fail decompression if the input & output pointers aren't spaced + * far enough apart. + * + * This is important to set, even when the pointers are far enough + * apart, because ZSTD_decompressBlock_internal() can decide to store + * literals in the output buffer, after the block it is decompressing. + * Since we don't want anything to overwrite our input, we have to tell + * ZSTD_decompressBlock_internal to never write past ip. + * + * See ZSTD_allocateLiteralsBuffer() for reference. + */ + oBlockEnd = op + (ip - op); + } + switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming); break; case bt_raw : + /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : - decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: @@ -956,6 +1045,7 @@ } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ + DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %zi, consuming %zi bytes of input", op-ostart, ip - (const BYTE*)*srcPtr); *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); @@ -1002,17 +1092,18 @@ } #endif - { U32 const magicNumber = MEM_readLE32(src); - DEBUGLOG(4, "reading magic number %08X (expecting %08X)", - (unsigned)magicNumber, ZSTD_MAGICNUMBER); + if (srcSize >= 4) { + U32 const magicNumber = MEM_readLE32(src); + DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + /* skippable frame detected : skip it */ size_t const skippableSize = readSkippableFrameSize(src, srcSize); - FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed"); + FORWARD_IF_ERROR(skippableSize, "invalid skippable frame"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; - continue; + continue; /* check next frame */ } } if (ddict) { @@ -1108,8 +1199,8 @@ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /** - * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed, - * we allow taking a partial block as the input. Currently only raw uncompressed blocks can + * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we + * allow taking a partial block as the input. Currently only raw uncompressed blocks can * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce @@ -1309,7 +1400,7 @@ default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } @@ -1350,11 +1441,11 @@ /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (size_t)(dictEnd - dictPtr), - workspace, workspaceSize); + workspace, workspaceSize, /* flags */ 0); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; @@ -1453,7 +1544,7 @@ dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; @@ -1515,7 +1606,7 @@ * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. - * Needed dictionary is a hidden information. + * Needed dictionary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. @@ -1524,7 +1615,7 @@ * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; @@ -1631,7 +1722,9 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); - return ZSTD_initDStream_usingDDict(zds, NULL); + FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); + FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); + return ZSTD_startingInputLength(zds->format); } /* ZSTD_initDStream_usingDDict() : @@ -1639,6 +1732,7 @@ * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { + DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); @@ -1649,6 +1743,7 @@ * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { + DEBUGLOG(4, "ZSTD_resetDStream"); FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } @@ -1720,6 +1815,11 @@ bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; + case ZSTD_d_disableHuffmanAssembly: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + default:; } bounds.error = ERROR(parameter_unsupported); @@ -1760,6 +1860,9 @@ case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; + case ZSTD_d_disableHuffmanAssembly: + *value = (int)dctx->disableHufAsm; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1793,6 +1896,10 @@ } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; + case ZSTD_d_disableHuffmanAssembly: + CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); + dctx->disableHufAsm = value != 0; + return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); @@ -1980,7 +2087,6 @@ if (zds->refMultipleDDicts && zds->ddictSet) { ZSTD_DCtx_selectFrameDDict(zds); } - DEBUGLOG(5, "header size : %u", (U32)hSize); if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); @@ -2012,6 +2118,11 @@ zds->lhSize += remainingInput; } input->pos = input->size; + /* check first few bytes */ + FORWARD_IF_ERROR( + ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format), + "First few bytes detected incorrect" ); + /* return hint input size */ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); @@ -2029,8 +2140,9 @@ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + assert(istart != NULL); ip = istart + cSize; - op += decompressedSize; + op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; @@ -2114,6 +2226,7 @@ } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); + assert(ip != NULL); ip += neededInSize; /* Function modifies the stage so we must break */ break; @@ -2128,7 +2241,7 @@ int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ - assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { @@ -2137,8 +2250,11 @@ "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } - ip += loadedSize; - zds->inPos += loadedSize; + if (loadedSize != 0) { + /* ip may be NULL */ + ip += loadedSize; + zds->inPos += loadedSize; + } if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ @@ -2148,14 +2264,17 @@ break; } case zdss_flush: - { size_t const toFlushSize = zds->outEnd - zds->outStart; + { + size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); - op += flushedSize; + + op = op ? op + flushedSize : op; + zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) - && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); @@ -2169,7 +2288,7 @@ default: assert(0); /* impossible */ - RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } /* result */ @@ -2182,8 +2301,8 @@ if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { - RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); - RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); + RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); + RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); assert(0); } } else { @@ -2220,11 +2339,17 @@ void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { - ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; - ZSTD_inBuffer input = { src, srcSize, *srcPos }; - /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ - size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); - *dstPos = output.pos; - *srcPos = input.pos; - return cErr; + ZSTD_outBuffer output; + ZSTD_inBuffer input; + output.dst = dst; + output.size = dstCapacity; + output.pos = *dstPos; + input.src = src; + input.size = srcSize; + input.pos = *srcPos; + { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; + } } diff --git a/sys/contrib/zstd/lib/decompress/zstd_decompress_block.h b/sys/contrib/zstd/lib/decompress/zstd_decompress_block.h --- a/sys/contrib/zstd/lib/decompress/zstd_decompress_block.h +++ b/sys/contrib/zstd/lib/decompress/zstd_decompress_block.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -64,5 +64,10 @@ unsigned tableLog, void* wksp, size_t wkspSize, int bmi2); +/* Internal definition of ZSTD_decompressBlock() to avoid deprecation warnings. */ +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + #endif /* ZSTD_DEC_BLOCK_H */ diff --git a/sys/contrib/zstd/lib/decompress/zstd_decompress_block.c b/sys/contrib/zstd/lib/decompress/zstd_decompress_block.c --- a/sys/contrib/zstd/lib/decompress/zstd_decompress_block.c +++ b/sys/contrib/zstd/lib/decompress/zstd_decompress_block.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,12 +20,12 @@ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" -#define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/zstd_internal.h" #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" +#include "../common/bits.h" /* ZSTD_highbit32 */ /*_******************************************************* * Macros @@ -89,7 +89,7 @@ dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; } else { - /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */ + /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; } @@ -134,13 +134,16 @@ ZSTD_FALLTHROUGH; case set_compressed: - RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); + RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3"); { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); + int const flags = 0 + | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) + | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -165,6 +168,10 @@ } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + if (!singleStream) + RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, + "Not enough literals (%zu) for the 4-streams mode (min %u)", + litSize, MIN_LITERALS_FOR_4_STREAMS); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); @@ -176,13 +183,14 @@ if (litEncType==set_repeat) { if (singleStream) { - hufSuccess = HUF_decompress1X_usingDTable_bmi2( + hufSuccess = HUF_decompress1X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } else { - hufSuccess = HUF_decompress4X_usingDTable_bmi2( + assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); + hufSuccess = HUF_decompress4X_usingDTable( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); + dctx->HUFptr, flags); } } else { if (singleStream) { @@ -190,18 +198,18 @@ hufSuccess = HUF_decompress1X_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace)); + sizeof(dctx->workspace), flags); #else - hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( + hufSuccess = HUF_decompress1X1_DCtx_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); #endif } else { - hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( + hufSuccess = HUF_decompress4X_hufOnly_wksp( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); + sizeof(dctx->workspace), flags); } } if (dctx->litBufferLocation == ZSTD_split) @@ -237,6 +245,7 @@ break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3"); litSize = MEM_readLE24(istart) >> 4; break; } @@ -279,12 +288,13 @@ break; case 1: lhSize = 2; + RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3"); litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; + RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4"); litSize = MEM_readLE24(istart) >> 4; - RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); @@ -506,14 +516,15 @@ for (i = 8; i < n; i += 8) { MEM_write64(spread + pos + i, sv); } - pos += n; + assert(n>=0); + pos += (size_t)n; } } /* Now we spread those positions across the table. - * The benefit of doing it in two stages is that we avoid the the + * The benefit of doing it in two stages is that we avoid the * variable size inner loop, which caused lots of branch misses. * Now we can run through all the positions without any branch misses. - * We unroll the loop twice, since that is what emperically worked best. + * We unroll the loop twice, since that is what empirically worked best. */ { size_t position = 0; @@ -540,7 +551,7 @@ for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */ } } assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ } @@ -551,7 +562,7 @@ for (u=0; ustateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); + ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); +#else const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; +#endif seq.matchLength = mlDInfo->baseValue; seq.litLength = llDInfo->baseValue; { U32 const ofBase = ofDInfo->baseValue; @@ -1186,28 +1220,31 @@ U32 const llnbBits = llDInfo->nbBits; U32 const mlnbBits = mlDInfo->nbBits; U32 const ofnbBits = ofDInfo->nbBits; + + assert(llBits <= MaxLLBits); + assert(mlBits <= MaxMLBits); + assert(ofBits <= MaxOff); /* * As gcc has better branch and block analyzers, sometimes it is only - * valuable to mark likelyness for clang, it gives around 3-4% of + * valuable to mark likeliness for clang, it gives around 3-4% of * performance. */ /* sequence */ { size_t offset; - #if defined(__clang__) - if (LIKELY(ofBits > 1)) { - #else if (ofBits > 1) { - #endif ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); + ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + /* Always read extra bits, this keeps the logic simple, + * avoids branches, and avoids accidentally reading 0 bits. + */ + U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + offset += BIT_readBitsFast(&seqState->DStream, extraBits); } else { offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); @@ -1232,11 +1269,7 @@ seq.offset = offset; } - #if defined(__clang__) - if (UNLIKELY(mlBits > 0)) - #else if (mlBits > 0) - #endif seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) @@ -1246,11 +1279,7 @@ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - #if defined(__clang__) - if (UNLIKELY(llBits > 0)) - #else if (llBits > 0) - #endif seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); if (MEM_32bits()) @@ -1552,7 +1581,7 @@ const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body"); + DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); (void)frame; /* Regen sequences */ @@ -1945,34 +1974,79 @@ #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ +/** + * @returns The total size of the history referenceable by zstd, including + * both the prefix and the extDict. At @p op any offset larger than this + * is invalid. + */ +static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart) +{ + return (size_t)(op - virtualStart); +} -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) -/* ZSTD_getLongOffsetsShare() : +typedef struct { + unsigned longOffsetShare; + unsigned maxNbAdditionalBits; +} ZSTD_OffsetInfo; + +/* ZSTD_getOffsetInfo() : * condition : offTable must be valid * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) - * compared to maximum possible of (1< 22) total += 1; + ZSTD_OffsetInfo info = {0, 0}; + /* If nbSeq == 0, then the offTable is uninitialized, but we have + * no sequences, so both values should be 0. + */ + if (nbSeq != 0) { + const void* ptr = offTable; + U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog; + const ZSTD_seqSymbol* table = offTable + 1; + U32 const max = 1 << tableLog; + U32 u; + DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog); + + assert(max <= (1 << OffFSELog)); /* max not too large */ + for (u=0; u 22) info.longOffsetShare += 1; + } + + assert(tableLog <= OffFSELog); + info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ } - assert(tableLog <= OffFSELog); - total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + return info; +} - return total; +/** + * @returns The maximum offset we can decode in one read of our bitstream, without + * reloading more bits in the middle of the offset bits read. Any offsets larger + * than this must use the long offset decoder. + */ +static size_t ZSTD_maxShortOffset(void) +{ + if (MEM_64bits()) { + /* We can decode any offset without reloading bits. + * This might change if the max window size grows. + */ + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + return (size_t)-1; + } else { + /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. + * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. + * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. + */ + size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; + size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; + assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); + return maxOffset; + } } -#endif size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, @@ -1980,20 +2054,21 @@ const void* src, size_t srcSize, const int frame, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; - /* isLongOffset must be true if there are long offsets. - * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. - * We don't expect that to be the case in 64-bit mode. - * In block mode, window size is not known, so we have to be conservative. - * (note: but it could be evaluated from current-lowLimit) - */ - ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); - RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); + /* Note : the wording of the specification + * allows compressed block to be sized exactly ZSTD_BLOCKSIZE_MAX. + * This generally does not happen, as it makes little sense, + * since an uncompressed block would feature same size and have no decompression cost. + * Also, note that decoder from reference libzstd before < v1.5.4 + * would consider this edge case as an error. + * As a consequence, avoid generating compressed blocks of size ZSTD_BLOCKSIZE_MAX + * for broader compatibility with the deployed ecosystem of zstd decoders */ + RETURN_ERROR_IF(srcSize > ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); - DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; @@ -2001,6 +2076,23 @@ /* Build Decoding Tables */ { + /* Compute the maximum block size, which must also work when !frame and fParams are unset. + * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. + */ + size_t const blockSizeMax = MIN(dstCapacity, (frame ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX)); + size_t const totalHistorySize = ZSTD_totalHistorySize((BYTE*)dst + blockSizeMax, (BYTE const*)dctx->virtualStart); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than ZSTD_maxShortOffset(). + * We don't expect that to be the case in 64-bit mode. + * + * We check here to see if our history is large enough to allow long offsets. + * If it isn't, then we can't possible have (valid) long offsets. If the offset + * is invalid, then it is okay to read it incorrectly. + * + * If isLongOffsets is true, then we will later check our decoding table to see + * if it is even possible to generate long offsets. + */ + ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. @@ -2008,6 +2100,11 @@ #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; +#else + /* Set to 1 to avoid computing offset info if we don't need to. + * Otherwise this value is ignored. + */ + int usePrefetchDecoder = 1; #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); @@ -2015,28 +2112,42 @@ ip += seqHSize; srcSize -= seqHSize; - RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall, + "invalid dst"); -#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ - !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if ( !usePrefetchDecoder - && (!frame || (dctx->fParams.windowSize > (1<<24))) - && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ - U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); - U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ - usePrefetchDecoder = (shareLongOffsets >= minShare); + /* If we could potentially have long offsets, or we might want to use the prefetch decoder, + * compute information about the share of long offsets, and the maximum nbAdditionalBits. + * NOTE: could probably use a larger nbSeq limit + */ + if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { + ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); + if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { + /* If isLongOffset, but the maximum number of additional bits that we see in our table is small + * enough, then we know it is impossible to have too long an offset in this block, so we can + * use the regular offset decoder. + */ + isLongOffset = ZSTD_lo_isRegularOffset; + } + if (!usePrefetchDecoder) { + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + usePrefetchDecoder = (info.longOffsetShare >= minShare); + } } -#endif dctx->ddictIsCold = 0; #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) - if (usePrefetchDecoder) + if (usePrefetchDecoder) { +#else + (void)usePrefetchDecoder; + { #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif + } #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ @@ -2060,9 +2171,9 @@ } -size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize) +size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) { size_t dSize; ZSTD_checkContinuity(dctx, dst, dstCapacity); @@ -2070,3 +2181,12 @@ dctx->previousDstEnd = (char*)dst + dSize; return dSize; } + + +/* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); +} diff --git a/sys/contrib/zstd/lib/decompress/zstd_decompress_internal.h b/sys/contrib/zstd/lib/decompress/zstd_decompress_internal.h --- a/sys/contrib/zstd/lib/decompress/zstd_decompress_internal.h +++ b/sys/contrib/zstd/lib/decompress/zstd_decompress_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -75,12 +75,13 @@ #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64)) #define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32)) +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 typedef struct { ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ - HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + HUF_DTable hufTable[HUF_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUF_decompress4X */ U32 rep[ZSTD_REP_NUM]; U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; } ZSTD_entropyDTables_t; @@ -164,6 +165,7 @@ ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */ + int disableHufAsm; /* streaming */ ZSTD_dStreamStage streamStage; diff --git a/sys/contrib/zstd/lib/deprecated/zbuff.h b/sys/contrib/zstd/lib/deprecated/zbuff.h --- a/sys/contrib/zstd/lib/deprecated/zbuff.h +++ b/sys/contrib/zstd/lib/deprecated/zbuff.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/deprecated/zbuff_common.c b/sys/contrib/zstd/lib/deprecated/zbuff_common.c --- a/sys/contrib/zstd/lib/deprecated/zbuff_common.c +++ b/sys/contrib/zstd/lib/deprecated/zbuff_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/deprecated/zbuff_compress.c b/sys/contrib/zstd/lib/deprecated/zbuff_compress.c --- a/sys/contrib/zstd/lib/deprecated/zbuff_compress.c +++ b/sys/contrib/zstd/lib/deprecated/zbuff_compress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/deprecated/zbuff_decompress.c b/sys/contrib/zstd/lib/deprecated/zbuff_decompress.c --- a/sys/contrib/zstd/lib/deprecated/zbuff_decompress.c +++ b/sys/contrib/zstd/lib/deprecated/zbuff_decompress.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,6 +13,8 @@ /* ************************************* * Dependencies ***************************************/ +#define ZSTD_DISABLE_DEPRECATE_WARNINGS /* suppress warning on ZSTD_initDStream_usingDict */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" diff --git a/sys/contrib/zstd/lib/dictBuilder/cover.h b/sys/contrib/zstd/lib/dictBuilder/cover.h --- a/sys/contrib/zstd/lib/dictBuilder/cover.h +++ b/sys/contrib/zstd/lib/dictBuilder/cover.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/dictBuilder/cover.c b/sys/contrib/zstd/lib/dictBuilder/cover.c --- a/sys/contrib/zstd/lib/dictBuilder/cover.c +++ b/sys/contrib/zstd/lib/dictBuilder/cover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -34,6 +34,7 @@ #include "../common/pool.h" #include "../common/threading.h" #include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/bits.h" /* ZSTD_highbit32 */ #include "../zdict.h" #include "cover.h" @@ -541,7 +542,7 @@ /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `COVER_ctx_destroy()`. @@ -646,7 +647,7 @@ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) { - const double ratio = (double)nbDmers / maxDictSize; + const double ratio = (double)nbDmers / (double)maxDictSize; if (ratio >= 10) { return; } @@ -950,9 +951,17 @@ } } +static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz) +{ + COVER_dictSelection_t ds; + ds.dictContent = buf; + ds.dictSize = s; + ds.totalCompressedSize = csz; + return ds; +} + COVER_dictSelection_t COVER_dictSelectionError(size_t error) { - COVER_dictSelection_t selection = { NULL, 0, error }; - return selection; + return setDictSelection(NULL, 0, error); } unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { @@ -1005,9 +1014,8 @@ } if (params.shrinkDict == 0) { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; free(candidateDictBuffer); - return selection; + return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize); } largestDict = dictContentSize; @@ -1039,20 +1047,16 @@ return COVER_dictSelectionError(totalCompressedSize); } - if (totalCompressedSize <= largestCompressed * regressionTolerance) { - COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; + if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) { free(largestDictbuffer); - return selection; + return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize ); } dictContentSize *= 2; } dictContentSize = largestDict; totalCompressedSize = largestCompressed; - { - COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; - free(candidateDictBuffer); - return selection; - } + free(candidateDictBuffer); + return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize ); } /** diff --git a/sys/contrib/zstd/lib/dictBuilder/fastcover.c b/sys/contrib/zstd/lib/dictBuilder/fastcover.c --- a/sys/contrib/zstd/lib/dictBuilder/fastcover.c +++ b/sys/contrib/zstd/lib/dictBuilder/fastcover.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -304,7 +304,7 @@ /** * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple + * The context is only dependent on the parameter `d` and can be used multiple * times. * Returns 0 on success or error code on error. * The context must be destroyed with `FASTCOVER_ctx_destroy()`. diff --git a/sys/contrib/zstd/lib/dictBuilder/zdict.c b/sys/contrib/zstd/lib/dictBuilder/zdict.c --- a/sys/contrib/zstd/lib/dictBuilder/zdict.c +++ b/sys/contrib/zstd/lib/dictBuilder/zdict.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -44,7 +44,6 @@ #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif -#define HUF_STATIC_LINKING_ONLY #include "../common/mem.h" /* read */ #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ @@ -54,6 +53,7 @@ #include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */ #include "../zdict.h" #include "divsufsort.h" +#include "../common/bits.h" /* ZSTD_NbCommonBytes */ /*-************************************* @@ -130,85 +130,6 @@ /*-******************************************************** * Dictionary training functions **********************************************************/ -static unsigned ZDICT_NbCommonBytes (size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanForward64(&r, (U64)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanForward(&r, (U32)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - if (val != 0) { - unsigned long r; - _BitScanReverse64(&r, val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - if (val != 0) { - unsigned long r; - _BitScanReverse(&r, (unsigned long)val); - return (unsigned)(r >> 3); - } else { - /* Should not reach this code path */ - __assume(0); - } -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (unsigned)(__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. @@ -223,7 +144,7 @@ pMatch = (const char*)pMatch+sizeof(size_t); continue; } - pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); + pIn = (const char*)pIn+ZSTD_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } @@ -451,7 +372,7 @@ elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) - table[u] = table[u-1], u--; + table[u] = table[u-1], u--; table[u] = elt; return u; } } @@ -602,7 +523,7 @@ if (solution.length==0) { cursor++; continue; } ZDICT_insertDictItem(dictList, dictListSize, solution, buffer); cursor += solution.length; - DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100); + DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0); } } _cleanup: @@ -645,11 +566,11 @@ size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ - { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); + { size_t const errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } } - cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); + cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ @@ -754,6 +675,7 @@ size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; + U32 wksp[HUF_CTABLE_WORKSPACE_SIZE_U32]; /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); @@ -794,7 +716,7 @@ } } /* analyze, build stats, starting with literals */ - { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + { size_t maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(maxNbBits)) { eSize = maxNbBits; DISPLAYLEVEL(1, " HUF_buildCTable error \n"); @@ -803,7 +725,7 @@ if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n"); ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */ - maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog); + maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp)); assert(maxNbBits==9); } huffLog = (U32)maxNbBits; @@ -844,7 +766,7 @@ llLog = (U32)errorCode; /* write result to buffer */ - { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog); + { size_t const hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(wksp)); if (HUF_isError(hhSize)) { eSize = hhSize; DISPLAYLEVEL(1, "HUF_writeCTable error \n"); @@ -1047,9 +969,6 @@ * Warning : `samplesBuffer` must be followed by noisy guard band !!! * @return : size of dictionary, or an error code which can be tested with ZDICT_isError() */ -/* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */ -ZSTDLIB_API -/* End FreeBSD */ static size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, diff --git a/sys/contrib/zstd/lib/legacy/zstd_legacy.h b/sys/contrib/zstd/lib/legacy/zstd_legacy.h --- a/sys/contrib/zstd/lib/legacy/zstd_legacy.h +++ b/sys/contrib/zstd/lib/legacy/zstd_legacy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -242,6 +242,13 @@ frameSizeInfo.compressedSize = ERROR(srcSize_wrong); frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; } + /* In all cases, decompressedBound == nbBlocks * ZSTD_BLOCKSIZE_MAX. + * So we can compute nbBlocks without having to change every function. + */ + if (frameSizeInfo.decompressedBound != ZSTD_CONTENTSIZE_ERROR) { + assert((frameSizeInfo.decompressedBound & (ZSTD_BLOCKSIZE_MAX - 1)) == 0); + frameSizeInfo.nbBlocks = (size_t)(frameSizeInfo.decompressedBound / ZSTD_BLOCKSIZE_MAX); + } return frameSizeInfo; } diff --git a/sys/contrib/zstd/lib/legacy/zstd_v01.h b/sys/contrib/zstd/lib/legacy/zstd_v01.h --- a/sys/contrib/zstd/lib/legacy/zstd_v01.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v01.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v01.c b/sys/contrib/zstd/lib/legacy/zstd_v01.c --- a/sys/contrib/zstd/lib/legacy/zstd_v01.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v01.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -190,25 +190,6 @@ /**************************************************************** * Memory I/O *****************************************************************/ -/* FSE_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define FSE_FORCE_MEMORY_ACCESS 1 -# endif -#endif - static unsigned FSE_32bits(void) { @@ -221,24 +202,6 @@ return one.c[0]; } -#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) - -static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } -static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -254,8 +217,6 @@ U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } -#endif /* FSE_FORCE_MEMORY_ACCESS */ - static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) @@ -1190,7 +1151,7 @@ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1759,20 +1720,26 @@ static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ const BYTE* const ostart = op; + BYTE* const oLitEnd = op + sequence.litLength; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; - /* check */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); - if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ - if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) - memmove(op, *litPtr, litLength); /* overwrite risk */ - else - ZSTD_wildcopy(op, *litPtr, litLength); + ZSTD_memmove(op, *litPtr, sequence.litLength); /* note : v0.1 seems to allow scenarios where output or input are close to end of buffer */ + op += litLength; *litPtr = litEnd; /* update for next sequence */ diff --git a/sys/contrib/zstd/lib/legacy/zstd_v02.h b/sys/contrib/zstd/lib/legacy/zstd_v02.h --- a/sys/contrib/zstd/lib/legacy/zstd_v02.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v02.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v02.c b/sys/contrib/zstd/lib/legacy/zstd_v02.c --- a/sys/contrib/zstd/lib/legacy/zstd_v02.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v02.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,7 +28,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -115,24 +115,6 @@ /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -143,33 +125,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -190,9 +145,6 @@ memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -269,7 +221,7 @@ header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -433,7 +385,7 @@ } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -453,7 +405,7 @@ } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -510,7 +462,7 @@ Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -609,7 +561,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -753,7 +705,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -822,7 +774,7 @@ Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -882,7 +834,7 @@ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -946,7 +898,7 @@ FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1450,7 +1402,7 @@ Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2609,7 +2561,7 @@ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3114,12 +3066,19 @@ const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use the pointer check */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ diff --git a/sys/contrib/zstd/lib/legacy/zstd_v03.h b/sys/contrib/zstd/lib/legacy/zstd_v03.h --- a/sys/contrib/zstd/lib/legacy/zstd_v03.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v03.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v03.c b/sys/contrib/zstd/lib/legacy/zstd_v03.c --- a/sys/contrib/zstd/lib/legacy/zstd_v03.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v03.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -29,7 +29,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -116,24 +116,6 @@ /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -144,33 +126,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -191,10 +146,6 @@ memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -271,7 +222,7 @@ header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -435,7 +386,7 @@ } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -455,7 +406,7 @@ } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -512,7 +463,7 @@ Error codes and messages Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -611,7 +562,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -755,7 +706,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -823,7 +774,7 @@ Header File Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -883,7 +834,7 @@ Header File for static linking only Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -947,7 +898,7 @@ FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1451,7 +1402,7 @@ Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2248,7 +2199,7 @@ zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2755,18 +2706,24 @@ const BYTE* const litEnd = *litPtr + sequence.litLength; /* checks */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (sequence.offset > (U32)(oLitEnd - base)) return ERROR(corruption_detected); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ /* copy Match */ - { - const BYTE* match = op - sequence.offset; + { const BYTE* match = op - sequence.offset; /* check */ if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ diff --git a/sys/contrib/zstd/lib/legacy/zstd_v04.h b/sys/contrib/zstd/lib/legacy/zstd_v04.h --- a/sys/contrib/zstd/lib/legacy/zstd_v04.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v04.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v04.c b/sys/contrib/zstd/lib/legacy/zstd_v04.c --- a/sys/contrib/zstd/lib/legacy/zstd_v04.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v04.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -87,24 +87,6 @@ /**************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets generating assembly depending on alignment. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -115,33 +97,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -162,9 +117,6 @@ memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -542,7 +494,7 @@ header file (to include) Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -700,7 +652,7 @@ } /*! BIT_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -720,7 +672,7 @@ } /*!BIT_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); @@ -781,7 +733,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -930,7 +882,7 @@ FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1436,7 +1388,7 @@ header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1514,7 +1466,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1601,7 +1553,7 @@ Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2401,7 +2353,7 @@ zstd - decompression module fo v0.4 legacy format Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2876,13 +2828,19 @@ const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTD_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3283,7 +3241,7 @@ Buffered version of Zstd compression library Copyright (C) 2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/sys/contrib/zstd/lib/legacy/zstd_v05.h b/sys/contrib/zstd/lib/legacy/zstd_v05.h --- a/sys/contrib/zstd/lib/legacy/zstd_v05.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v05.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v05.c b/sys/contrib/zstd/lib/legacy/zstd_v05.c --- a/sys/contrib/zstd/lib/legacy/zstd_v05.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v05.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -19,7 +19,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -106,24 +106,6 @@ /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } @@ -134,37 +116,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -195,9 +146,6 @@ memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - - MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) @@ -262,7 +210,7 @@ Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -286,7 +234,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTD_STATIC_H #define ZSTD_STATIC_H @@ -398,7 +346,7 @@ Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -485,7 +433,7 @@ #define FSEv05_ENCODING_DYNAMIC 3 -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ @@ -553,7 +501,7 @@ header file Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -672,7 +620,7 @@ header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -826,7 +774,7 @@ } /*! BITv05_lookBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -846,7 +794,7 @@ } /*!BITv05_readBitsFast : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits) { size_t value = BITv05_lookBitsFast(bitD, nbBits); @@ -901,7 +849,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1051,7 +999,7 @@ FSEv05 : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1537,7 +1485,7 @@ header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1610,7 +1558,7 @@ header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1702,7 +1650,7 @@ Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2547,7 +2495,7 @@ zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2645,7 +2593,7 @@ FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)]; FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)]; FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)]; - unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv05_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2673,7 +2621,7 @@ dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagStaticTables = 0; return 0; } @@ -3234,13 +3182,19 @@ const BYTE* const litEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv05_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = litEnd; /* update for next sequence */ @@ -3746,7 +3700,7 @@ Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/sys/contrib/zstd/lib/legacy/zstd_v06.h b/sys/contrib/zstd/lib/legacy/zstd_v06.h --- a/sys/contrib/zstd/lib/legacy/zstd_v06.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v06.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v06.c b/sys/contrib/zstd/lib/legacy/zstd_v06.c --- a/sys/contrib/zstd/lib/legacy/zstd_v06.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v06.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -23,7 +23,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -108,24 +108,6 @@ /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -136,33 +118,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -183,9 +138,6 @@ memcpy(memPtr, &value, sizeof(value)); } - -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -281,7 +233,7 @@ Header File for static linking only Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -305,7 +257,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ #ifndef ZSTDv06_STATIC_H #define ZSTDv06_STATIC_H @@ -412,7 +364,7 @@ Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -479,7 +431,7 @@ #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 #define IS_HUF 0 #define IS_PCH 1 @@ -619,7 +571,7 @@ Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -767,7 +719,7 @@ header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -928,7 +880,7 @@ } /*! BITv06_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -948,7 +900,7 @@ } /*! BITv06_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits) { size_t const value = BITv06_lookBitsFast(bitD, nbBits); @@ -1002,7 +954,7 @@ header file for static linking (only) Copyright (C) 2013-2015, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1210,7 +1162,7 @@ Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1355,7 +1307,7 @@ FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1679,7 +1631,7 @@ header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1749,7 +1701,7 @@ header file, for static linking only Copyright (C) 2013-2016, Yann Collet - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1931,7 +1883,7 @@ Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2676,7 +2628,7 @@ Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2700,7 +2652,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2730,7 +2682,7 @@ zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2754,7 +2706,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2806,7 +2758,7 @@ FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)]; FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)]; FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)]; - unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)]; + unsigned hufTableX4[HUFv06_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; const void* previousDstEnd; const void* base; const void* vBase; @@ -2834,7 +2786,7 @@ dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTableX4[0] = HufLog; + dctx->hufTableX4[0] = ZSTD_HUFFDTABLE_CAPACITY_LOG; dctx->flagRepeatTable = 0; return 0; } @@ -3370,13 +3322,19 @@ const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; - /* check */ - if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ + /* checks */ + size_t const seqLength = sequence.litLength + sequence.matchLength; + + if (seqLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected); + /* Now we know there are no overflow in literal nor match lengths, can use pointer checks */ + if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ /* copy Literals */ - ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ + ZSTDv06_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3889,7 +3847,7 @@ Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3913,7 +3871,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4035,7 +3993,8 @@ size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (ZSTDv06_isError(hSize)) return hSize; if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/sys/contrib/zstd/lib/legacy/zstd_v07.h b/sys/contrib/zstd/lib/legacy/zstd_v07.h --- a/sys/contrib/zstd/lib/legacy/zstd_v07.h +++ b/sys/contrib/zstd/lib/legacy/zstd_v07.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/lib/legacy/zstd_v07.c b/sys/contrib/zstd/lib/legacy/zstd_v07.c --- a/sys/contrib/zstd/lib/legacy/zstd_v07.c +++ b/sys/contrib/zstd/lib/legacy/zstd_v07.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Yann Collet, Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -184,7 +184,7 @@ low-level memory access routines Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -268,24 +268,6 @@ /*-************************************************************** * Memory I/O *****************************************************************/ -/* MEM_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method is portable but violate C standard. - * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) -# define MEM_FORCE_MEMORY_ACCESS 1 -# endif -#endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } @@ -296,33 +278,6 @@ return one.c[0]; } -#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) - -/* violates C standard, by lying on structure alignment. -Only use if no other choice to achieve best performance on target platform */ -MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } -MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } -MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } - -#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; - -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } - -#else - -/* default method, safe and standard. - can sometimes prove slower */ - MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; @@ -343,8 +298,6 @@ memcpy(memPtr, &value, sizeof(value)); } -#endif /* MEM_FORCE_MEMORY_ACCESS */ - MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ @@ -439,7 +392,7 @@ header file (to include) Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -596,7 +549,7 @@ } /*! BITv07_lookBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits) { U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; @@ -616,7 +569,7 @@ } /*! BITv07_readBitsFast() : -* unsafe version; only works only if nbBits >= 1 */ +* unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits) { size_t const value = BITv07_lookBitsFast(bitD, nbBits); @@ -670,7 +623,7 @@ Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -978,7 +931,7 @@ header file Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1151,7 +1104,7 @@ Common functions of New Generation Entropy library Copyright (C) 2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1375,7 +1328,7 @@ FSE : Finite State Entropy decoder Copyright (C) 2013-2015, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1699,7 +1652,7 @@ Huffman decoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2577,7 +2530,7 @@ Common functions of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2601,7 +2554,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -2647,7 +2600,7 @@ Header File for include Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2717,7 +2670,7 @@ #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ -#define HufLog 12 +#define ZSTD_HUFFDTABLE_CAPACITY_LOG 12 typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t; #define LONGNBSEQ 0x7F00 @@ -2854,7 +2807,7 @@ zstd - standard compression library Copyright (C) 2014-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2878,7 +2831,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net + - zstd homepage : https://facebook.github.io/zstd */ /* *************************************************************** @@ -2931,7 +2884,7 @@ FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)]; FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)]; FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)]; - HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */ + HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(ZSTD_HUFFDTABLE_CAPACITY_LOG)]; /* can accommodate HUFv07_decompress4X */ const void* previousDstEnd; const void* base; const void* vBase; @@ -2967,7 +2920,7 @@ dctx->base = NULL; dctx->vBase = NULL; dctx->dictEnd = NULL; - dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001); + dctx->hufTable[0] = (HUFv07_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; { int i; for (i=0; irep[i] = repStartValue[i]; } @@ -3599,11 +3552,14 @@ const BYTE* match = oLitEnd - sequence.offset; /* check */ - if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ - if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + assert(oend >= op); + if (sequence.litLength + WILDCOPY_OVERLENGTH > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + if (sequenceLength > (size_t)(oend - op)) return ERROR(dstSize_tooSmall); + assert(litLimit >= *litPtr); + if (sequence.litLength > (size_t)(litLimit - *litPtr)) return ERROR(corruption_detected);; /* copy Literals */ - ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + ZSTDv07_wildcopy(op, *litPtr, (ptrdiff_t)sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ @@ -3617,7 +3573,7 @@ return sequenceLength; } /* span extDict & currentPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = (size_t)(dictEnd - match); memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -4253,7 +4209,7 @@ Buffered version of Zstd compression library Copyright (C) 2015-2016, Yann Collet. - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4277,7 +4233,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - - zstd homepage : http://www.zstd.net/ + - zstd homepage : https://facebook.github.io/zstd/ */ @@ -4417,7 +4373,8 @@ if (hSize != 0) { size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ - memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); + if (ip != NULL) + memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip); zbd->lhSize += iend-ip; *dstCapacityPtr = 0; return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */ diff --git a/sys/contrib/zstd/lib/libzstd.mk b/sys/contrib/zstd/lib/libzstd.mk --- a/sys/contrib/zstd/lib/libzstd.mk +++ b/sys/contrib/zstd/lib/libzstd.mk @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -15,17 +15,34 @@ # Zstd lib directory LIBZSTD ?= ./ +# ZSTD_LIB_MINIFY is a helper variable that +# configures a bunch of other variables to space-optimized defaults. +ZSTD_LIB_MINIFY ?= 0 + # Legacy support -ZSTD_LEGACY_SUPPORT ?= 5 +ifneq ($(ZSTD_LIB_MINIFY), 0) + ZSTD_LEGACY_SUPPORT ?= 0 +else + ZSTD_LEGACY_SUPPORT ?= 5 +endif ZSTD_LEGACY_MULTITHREADED_API ?= 0 # Build size optimizations -HUF_FORCE_DECOMPRESS_X1 ?= 0 -HUF_FORCE_DECOMPRESS_X2 ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 -ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 -ZSTD_NO_INLINE ?= 0 -ZSTD_STRIP_ERROR_STRINGS ?= 0 +ifneq ($(ZSTD_LIB_MINIFY), 0) + HUF_FORCE_DECOMPRESS_X1 ?= 1 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 1 + ZSTD_STRIP_ERROR_STRINGS ?= 1 +else + HUF_FORCE_DECOMPRESS_X1 ?= 0 + HUF_FORCE_DECOMPRESS_X2 ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 0 + ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG ?= 0 + ZSTD_NO_INLINE ?= 0 + ZSTD_STRIP_ERROR_STRINGS ?= 0 +endif # Assembly support ZSTD_NO_ASM ?= 0 @@ -61,17 +78,8 @@ CCVER := $(shell $(CC) --version) ZSTD_VERSION?= $(LIBVER) -# ZSTD_LIB_MINIFY is a helper variable that -# configures a bunch of other variables to space-optimized defaults. -ZSTD_LIB_MINIFY ?= 0 ifneq ($(ZSTD_LIB_MINIFY), 0) HAVE_CC_OZ ?= $(shell echo "" | $(CC) -Oz -x c -c - -o /dev/null 2> /dev/null && echo 1 || echo 0) - ZSTD_LEGACY_SUPPORT ?= 0 - ZSTD_LIB_DEPRECATED ?= 0 - HUF_FORCE_DECOMPRESS_X1 ?= 1 - ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT ?= 1 - ZSTD_NO_INLINE ?= 1 - ZSTD_STRIP_ERROR_STRINGS ?= 1 ifneq ($(HAVE_CC_OZ), 0) # Some compilers (clang) support an even more space-optimized setting. CFLAGS += -Oz @@ -114,13 +122,16 @@ endif endif +ifeq ($(shell echo "int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }" | $(CC) $(FLAGS) -z cet-report=error -x c -Werror - -o $(VOID) 2>$(VOID) && echo 1 || echo 0),1) +LDFLAGS += -z cet-report=error +endif + HAVE_COLORNEVER = $(shell echo a | grep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) GREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) GREP_OPTIONS += --color=never endif GREP = grep $(GREP_OPTIONS) -SED_ERE_OPT ?= -E ZSTD_COMMON_FILES := $(sort $(wildcard $(LIBZSTD)/common/*.c)) ZSTD_COMPRESS_FILES := $(sort $(wildcard $(LIBZSTD)/compress/*.c)) diff --git a/sys/contrib/zstd/lib/libzstd.pc.in b/sys/contrib/zstd/lib/libzstd.pc.in --- a/sys/contrib/zstd/lib/libzstd.pc.in +++ b/sys/contrib/zstd/lib/libzstd.pc.in @@ -1,6 +1,6 @@ # ZSTD - standard compression algorithm -# Copyright (C) 2014-2016, Yann Collet, Facebook -# BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# BSD 2-Clause License (https://opensource.org/licenses/bsd-license.php) prefix=@PREFIX@ exec_prefix=@EXEC_PREFIX@ @@ -9,7 +9,7 @@ Name: zstd Description: fast lossless compression algorithm library -URL: http://www.zstd.net/ +URL: https://facebook.github.io/zstd/ Version: @VERSION@ Libs: -L${libdir} -lzstd Libs.private: @LIBS_PRIVATE@ diff --git a/sys/contrib/zstd/lib/module.modulemap b/sys/contrib/zstd/lib/module.modulemap --- a/sys/contrib/zstd/lib/module.modulemap +++ b/sys/contrib/zstd/lib/module.modulemap @@ -1,17 +1,27 @@ module libzstd [extern_c] { header "zstd.h" export * - config_macros [exhaustive] /* zstd.h */ \ + config_macros [exhaustive] \ + /* zstd.h */ \ ZSTD_STATIC_LINKING_ONLY, \ + ZSTDLIB_VISIBILITY, \ ZSTDLIB_VISIBLE, \ + ZSTDLIB_HIDDEN, \ ZSTD_DLL_EXPORT, \ ZSTDLIB_STATIC_API, \ ZSTD_DISABLE_DEPRECATE_WARNINGS, \ ZSTD_CLEVEL_DEFAULT, \ - /* zdict.h */ ZDICT_STATIC_LINKING_ONLY, \ + /* zdict.h */ \ + ZDICT_STATIC_LINKING_ONLY, \ + ZDICTLIB_VISIBLE, \ + ZDICTLIB_HIDDEN, \ ZDICTLIB_VISIBILITY, \ + ZDICTLIB_STATIC_API, \ ZDICT_DISABLE_DEPRECATE_WARNINGS, \ - /* zstd_errors.h */ ZSTDERRORLIB_VISIBILITY + /* zstd_errors.h */ \ + ZSTDERRORLIB_VISIBLE, \ + ZSTDERRORLIB_HIDDEN, \ + ZSTDERRORLIB_VISIBILITY module dictbuilder [extern_c] { header "zdict.h" diff --git a/sys/contrib/zstd/lib/zdict.h b/sys/contrib/zstd/lib/zdict.h --- a/sys/contrib/zstd/lib/zdict.h +++ b/sys/contrib/zstd/lib/zdict.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -8,32 +8,43 @@ * You may select, at your option, one of the above-listed licenses. */ -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - #if defined (__cplusplus) extern "C" { #endif +#ifndef ZSTD_ZDICT_H +#define ZSTD_ZDICT_H /*====== Dependencies ======*/ #include /* size_t */ /* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZDICTLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZDICTLIB_VISIBILITY +# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default"))) +# else +# define ZDICTLIB_VISIBLE +# endif +#endif + +#ifndef ZDICTLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else -# define ZDICTLIB_VISIBILITY +# define ZDICTLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY +# define ZDICTLIB_API ZDICTLIB_VISIBLE #endif /******************************************************************************* @@ -110,8 +121,8 @@ * The zstd CLI defaults to a 110KB dictionary. You likely don't need a * dictionary larger than that. But, most use cases can get away with a * smaller dictionary. The advanced dictionary builders can automatically - * shrink the dictionary for you, and select a the smallest size that - * doesn't hurt compression ratio too much. See the `shrinkDict` parameter. + * shrink the dictionary for you, and select the smallest size that doesn't + * hurt compression ratio too much. See the `shrinkDict` parameter. * A smaller dictionary can save memory, and potentially speed up * compression. * @@ -201,9 +212,9 @@ const size_t* samplesSizes, unsigned nbSamples); typedef struct { - int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) + int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value) * NOTE: The zstd format reserves some dictionary IDs for future use. * You may use them in private settings, but be warned that they * may be used by zstd in a public dictionary registry in the future. @@ -260,9 +271,21 @@ ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); +#endif /* ZSTD_ZDICT_H */ +#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC) +#define ZSTD_ZDICT_H_STATIC -#ifdef ZDICT_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZDICTLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE +# else +# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE +# endif +#endif /* ==================================================================================== * The definitions in this section are considered experimental. @@ -318,7 +341,7 @@ * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); @@ -340,7 +363,7 @@ * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_cover_params_t* parameters); @@ -361,7 +384,7 @@ * In general, it's recommended to provide a few thousands samples, though this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t parameters); @@ -384,7 +407,7 @@ * See ZDICT_trainFromBuffer() for details on failure modes. * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, +ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_fastCover_params_t* parameters); @@ -409,7 +432,7 @@ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( +ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy( void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); @@ -421,32 +444,31 @@ or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] # elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# define ZDICT_DEPRECATED(message) __attribute__((deprecated)) # elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# define ZDICT_DEPRECATED(message) __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# define ZDICT_DEPRECATED(message) # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +ZDICTLIB_STATIC_API size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); -#endif /* ZDICT_STATIC_LINKING_ONLY */ +#endif /* ZSTD_ZDICT_H_STATIC */ #if defined (__cplusplus) } #endif - -#endif /* DICTBUILDER_H_001 */ diff --git a/sys/contrib/zstd/lib/zstd.h b/sys/contrib/zstd/lib/zstd.h --- a/sys/contrib/zstd/lib/zstd.h +++ b/sys/contrib/zstd/lib/zstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -14,21 +14,31 @@ #ifndef ZSTD_H_235446 #define ZSTD_H_235446 -/* ====== Dependency ======*/ +/* ====== Dependencies ======*/ #include /* INT_MAX */ #include /* size_t */ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ #ifndef ZSTDLIB_VISIBLE -# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) + /* Backwards compatibility with old macro name */ +# ifdef ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) # define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) -# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else # define ZSTDLIB_VISIBLE +# endif +#endif + +#ifndef ZSTDLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else # define ZSTDLIB_HIDDEN # endif #endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) @@ -37,6 +47,28 @@ # define ZSTDLIB_API ZSTDLIB_VISIBLE #endif +/* Deprecation warnings : + * Should these warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. + */ +#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS +# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZSTD_DEPRECATED(message) [[deprecated(message)]] +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ >= 3) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZSTD_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") +# define ZSTD_DEPRECATED(message) +# endif +#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ + /******************************************************************************* Introduction @@ -74,7 +106,7 @@ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 2 +#define ZSTD_VERSION_RELEASE 5 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -116,7 +148,8 @@ ***************************************/ /*! ZSTD_compress() : * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, @@ -165,7 +198,9 @@ * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ -ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); +ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize") +ZSTDLIB_API +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ * `src` should point to the start of a ZSTD frame or skippable frame. @@ -177,8 +212,30 @@ /*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +/* ZSTD_compressBound() : + * maximum compressed size in worst case single-pass scenario. + * When invoking `ZSTD_compress()` or any other one-pass compression function, + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + * as it eliminates one potential failure scenario, + * aka not enough room in dst buffer to write the compressed frame. + * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE . + * In which case, ZSTD_compressBound() will return an error code + * which can be tested using ZSTD_isError(). + * + * ZSTD_COMPRESSBOUND() : + * same as ZSTD_compressBound(), but as a macro. + * It can be used to produce constants, which can be useful for static allocation, + * for example to size a static array on stack. + * Will produce constant value 0 if srcSize too large. + */ +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U) +#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +/* ZSTD_isError() : + * Most ZSTD_* functions returning a size_t value can be tested for error, + * using ZSTD_isError(). + * @return 1 if error, 0 otherwise + */ ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ @@ -421,6 +478,9 @@ * ZSTD_c_validateSequences * ZSTD_c_useBlockSplitter * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback + * ZSTD_c_maxBlockSize * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -439,7 +499,11 @@ ZSTD_c_experimentalParam12=1009, ZSTD_c_experimentalParam13=1010, ZSTD_c_experimentalParam14=1011, - ZSTD_c_experimentalParam15=1012 + ZSTD_c_experimentalParam15=1012, + ZSTD_c_experimentalParam16=1013, + ZSTD_c_experimentalParam17=1014, + ZSTD_c_experimentalParam18=1015, + ZSTD_c_experimentalParam19=1016 } ZSTD_cParameter; typedef struct { @@ -502,7 +566,7 @@ * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. + * This also removes any reference to any dictionary or external sequence producer. * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. @@ -515,7 +579,8 @@ * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - The function is always blocking, returns when compression is completed. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have + * enough space to successfully compress the data, though it is possible it fails for other reasons. * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ @@ -552,13 +617,15 @@ * ZSTD_d_stableOutBuffer * ZSTD_d_forceIgnoreChecksum * ZSTD_d_refMultipleDDicts + * ZSTD_d_disableHuffmanAssembly * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly */ ZSTD_d_experimentalParam1=1000, ZSTD_d_experimentalParam2=1001, ZSTD_d_experimentalParam3=1002, - ZSTD_d_experimentalParam4=1003 + ZSTD_d_experimentalParam4=1003, + ZSTD_d_experimentalParam5=1004 } ZSTD_dParameter; @@ -737,8 +804,6 @@ * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. - * Streaming in combination with advanced parameters and dictionary compression - * can only be used through the new API. ******************************************************************************/ /*! @@ -747,6 +812,9 @@ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); + * + * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API + * to compress with a dictionary. */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! @@ -797,13 +865,31 @@ /*===== Streaming decompression functions =====*/ -/* This function is redundant with the advanced API and equivalent to: +/*! ZSTD_initDStream() : + * Initialize/reset DStream state for new decompression operation. + * Call before new decompression operation using same DStream. * + * Note : This function is redundant with the advanced API and equivalent to: * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * ZSTD_DCtx_refDDict(zds, NULL); */ ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); +/*! ZSTD_decompressStream() : + * Streaming decompression function. + * Call repetitively to consume full input updating it as necessary. + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. + * - `output.pos < output.size`, decoder finished and flushed all remaining buffers. + * - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, + * call ZSTD_decompressStream() again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is some decoding or flushing to do to complete current frame. + */ ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ @@ -922,7 +1008,7 @@ * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. @@ -934,9 +1020,11 @@ * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and - * only reset with the context is reset with ZSTD_reset_parameters or - * ZSTD_reset_session_and_parameters. Prefixes are single-use. + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). + * Dictionaries are sticky, they remain valid when same context is re-used, + * they only reset when the context is reset + * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters. + * In contrast, Prefixes are single-use. ******************************************************************************/ @@ -946,8 +1034,9 @@ * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). + * Note 1 : Dictionary is sticky, it will be used for all future compressed frames, + * until parameters are reset, a new dictionary is loaded, or the dictionary + * is explicitly invalidated by loading a NULL dictionary. * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, @@ -956,11 +1045,15 @@ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. */ + * to precisely select how dictionary content must be interpreted. + * Note 5 : This method does not benefit from LDM (long distance mode). + * If you want to employ LDM on some large dictionary content, + * prefer employing ZSTD_CCtx_refPrefix() described below. + */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used for all next compressed frames. + * Reference a prepared dictionary, to be used for all future compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. @@ -979,6 +1072,7 @@ * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). + * This method is compatible with LDM (long distance mode). * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. @@ -995,9 +1089,9 @@ const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. + * Create an internal DDict from dict buffer, to be used to decompress all future frames. + * The dictionary remains valid for all future frames, until explicitly invalidated, or + * a new dictionary is loaded. * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". @@ -1021,9 +1115,10 @@ * The memory for the table is allocated on the first call to refDDict, and can be * freed with ZSTD_freeDCtx(). * + * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary + * will be managed, and referencing a dictionary effectively "discards" any previous one. + * * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ @@ -1086,28 +1181,6 @@ # endif #endif -/* Deprecation warnings : - * Should these warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. - * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. - */ -#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API -# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ >= 3) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API -# endif -#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ - /**************************************************************************************** * experimental API (static linking only) **************************************************************************************** @@ -1142,6 +1215,7 @@ #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 +#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */ #define ZSTD_OVERLAPLOG_MIN 0 @@ -1322,7 +1396,7 @@ } ZSTD_paramSwitch_e; /*************************************** -* Frame size functions +* Frame header and size functions ***************************************/ /*! ZSTD_findDecompressedSize() : @@ -1369,33 +1443,113 @@ * or an error code (if srcSize is too small) */ ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; +typedef struct { + unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ + unsigned blockSizeMax; + ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + unsigned headerSize; + unsigned dictID; + unsigned checksumFlag; + unsigned _reserved1; + unsigned _reserved2; +} ZSTD_frameHeader; + +/*! ZSTD_getFrameHeader() : + * decode Frame Header, or requires larger `srcSize`. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ +/*! ZSTD_getFrameHeader_advanced() : + * same as ZSTD_getFrameHeader(), + * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); + +/*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. + * In this case, the output buffer must be at least (Margin + Output_Size) bytes large, + * and the input buffer must be at the end of the output buffer. + * + * _______________________ Output Buffer ________________________ + * | | + * | ____ Input Buffer ____| + * | | | + * v v v + * |---------------------------------------|-----------|----------| + * ^ ^ ^ + * |___________________ Output_Size ___________________|_ Margin _| + * + * NOTE: See also ZSTD_DECOMPRESSION_MARGIN(). + * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or + * ZSTD_decompressDCtx(). + * NOTE: This function supports multi-frame input. + * + * @param src The compressed frame(s) + * @param srcSize The size of the compressed frame(s) + * @returns The decompression margin or an error that can be checked with ZSTD_isError(). + */ +ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize); + +/*! ZSTD_DECOMPRESS_MARGIN() : + * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from + * the compressed frame, compute it from the original size and the blockSizeLog. + * See ZSTD_decompressionMargin() for details. + * + * WARNING: This macro does not support multi-frame input, the input must be a single + * zstd frame. If you need that support use the function, or implement it yourself. + * + * @param originalSize The original uncompressed size of the data. + * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX). + * Unless you explicitly set the windowLog smaller than + * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX. + */ +#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \ + ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \ + 4 /* checksum */ + \ + ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \ + (blockSize) /* One block of margin */ \ + )) + typedef enum { ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ } ZSTD_sequenceFormat_e; +/*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated + * from a buffer of srcSize bytes + * + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ +ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize); + /*! ZSTD_generateSequences() : - * Generate sequences using ZSTD_compress2, given a source buffer. + * Generate sequences using ZSTD_compress2(), given a source buffer. * * Each block will end with a dummy sequence * with offset == 0, matchLength == 0, and litLength == length of last literals. * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) * simply acts as a block delimiter. * - * zc can be used to insert custom compression params. - * This function invokes ZSTD_compress2 + * @zc can be used to insert custom compression params. + * This function invokes ZSTD_compress2(). * * The output of this function can be fed into ZSTD_compressSequences() with CCtx * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters * @return : number of sequences generated */ -ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, - size_t outSeqsSize, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t +ZSTD_generateSequences( ZSTD_CCtx* zc, + ZSTD_Sequence* outSeqs, size_t outSeqsSize, + const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - * by merging them into into the literals of the next sequence. + * by merging them into the literals of the next sequence. * * As such, the final generated result has no explicit representation of block boundaries, * and the final last literals segment is not represented in the sequences. @@ -1407,7 +1561,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : - * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) * The entire source is compressed into a single frame. * @@ -1432,17 +1588,18 @@ * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, * and cannot emit an RLE block that disagrees with the repcode history - * @return : final compressed size or a ZSTD error. + * @return : final compressed size, or a ZSTD error code. */ -ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t +ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize); /*! ZSTD_writeSkippableFrame() : * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. * - * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, + * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. @@ -1500,8 +1657,11 @@ * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. * - * Note 2 : only single-threaded compression is supported. + * Note : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * + * Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. */ ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); @@ -1520,7 +1680,12 @@ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ + * In this case, get total size by adding ZSTD_estimate?DictSize + * Note 2 : only single-threaded compression is supported. + * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. + * Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time. + * Size estimates assume that no external sequence producer is registered. + */ ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); @@ -1670,22 +1835,45 @@ * This function never fails (wide contract) */ ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +/*! ZSTD_CCtx_setCParams() : + * Set all parameters provided within @p cparams into the working @p cctx. + * Note : if modifying parameters during compression (MT mode only), + * note that changes to the .windowLog parameter will be ignored. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + * On failure, no parameters are updated. + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams); + +/*! ZSTD_CCtx_setFParams() : + * Set all parameters provided within @p fparams into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams); + +/*! ZSTD_CCtx_setParams() : + * Set all parameters provided within @p params into the working @p cctx. + * @return 0 on success, or an error code (can be checked with ZSTD_isError()). + */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params); + /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_compress2") +ZSTDLIB_STATIC_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary") +ZSTDLIB_STATIC_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, @@ -1829,13 +2017,16 @@ * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * - * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same - * between calls, except for the modifications that zstd makes to pos (the - * caller must not modify pos). This is checked by the compressor, and - * compression will fail if it ever changes. This means the only flush - * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end - * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos) - * MUST not be modified during compression or you will get data corruption. + * Tells the compressor that input data presented with ZSTD_inBuffer + * will ALWAYS be the same between calls. + * Technically, the @src pointer must never be changed, + * and the @pos field can only be updated by zstd. + * However, it's possible to increase the @size field, + * allowing scenarios where more data can be appended after compressions starts. + * These conditions are checked by the compressor, + * and compression will fail if they are not respected. + * Also, data in the ZSTD_inBuffer within the range [src, src + pos) + * MUST not be modified during compression or it will result in data corruption. * * When this flag is enabled zstd won't allocate an input window buffer, * because the user guarantees it can reference the ZSTD_inBuffer until @@ -1843,18 +2034,15 @@ * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also * avoid the memcpy() from the input buffer to the input window buffer. * - * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used. - * That means this flag cannot be used with ZSTD_compressStream(). - * * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using * this flag is ALWAYS memory safe, and will never access out-of-bounds - * memory. However, compression WILL fail if you violate the preconditions. + * memory. However, compression WILL fail if conditions are not respected. * - * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST - * not be modified during compression or you will get data corruption. This - * is because zstd needs to reference data in the ZSTD_inBuffer to find + * WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST + * not be modified during compression or it will result in data corruption. + * This is because zstd needs to reference data in the ZSTD_inBuffer to find * matches. Normally zstd maintains its own window buffer for this purpose, - * but passing this flag tells zstd to use the user provided buffer. + * but passing this flag tells zstd to rely on user provided buffer instead. */ #define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 @@ -1899,7 +2087,7 @@ * Without validation, providing a sequence that does not conform to the zstd spec will cause * undefined behavior, and may produce a corrupted block. * - * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for + * With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and * return an error. * @@ -1949,6 +2137,79 @@ */ #define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 +/* ZSTD_c_prefetchCDictTables + * Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto. + * + * In some situations, zstd uses CDict tables in-place rather than copying them + * into the working context. (See docs on ZSTD_dictAttachPref_e above for details). + * In such situations, compression speed is seriously impacted when CDict tables are + * "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables + * when they are used in-place. + * + * For sufficiently small inputs, the cost of the prefetch will outweigh the benefit. + * For sufficiently large inputs, zstd will by default memcpy() CDict tables + * into the working context, so there is no need to prefetch. This parameter is + * targeted at a middle range of input sizes, where a prefetch is cheap enough to be + * useful but memcpy() is too expensive. The exact range of input sizes where this + * makes sense is best determined by careful experimentation. + * + * Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable, + * but in the future zstd may conditionally enable this feature via an auto-detection + * heuristic for cold CDicts. + * Use ZSTD_ps_disable to opt out of prefetching under any circumstances. + */ +#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16 + +/* ZSTD_c_enableSeqProducerFallback + * Allowed values are 0 (disable) and 1 (enable). The default setting is 0. + * + * Controls whether zstd will fall back to an internal sequence producer if an + * external sequence producer is registered and returns an error code. This fallback + * is block-by-block: the internal sequence producer will only be called for blocks + * where the external sequence producer returns an error code. Fallback parsing will + * follow any other cParam settings, such as compression level, the same as in a + * normal (fully-internal) compression operation. + * + * The user is strongly encouraged to read the full Block-Level Sequence Producer API + * documentation (below) before setting this parameter. */ +#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17 + +/* ZSTD_c_maxBlockSize + * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB). + * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default. + * + * This parameter can be used to set an upper bound on the blocksize + * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper + * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make + * compressBound() inaccurate). Only currently meant to be used for testing. + * + */ +#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18 + +/* ZSTD_c_searchForExternalRepcodes + * This parameter affects how zstd parses external sequences, such as sequences + * provided through the compressSequences() API or from an external block-level + * sequence producer. + * + * If set to ZSTD_ps_enable, the library will check for repeated offsets in + * external sequences, even if those repcodes are not explicitly indicated in + * the "rep" field. Note that this is the only way to exploit repcode matches + * while using compressSequences() or an external sequence producer, since zstd + * currently ignores the "rep" field of external sequences. + * + * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in + * external sequences, regardless of whether the "rep" field has been set. This + * reduces sequence compression overhead by about 25% while sacrificing some + * compression ratio. + * + * The default value is ZSTD_ps_auto, for which the library will enable/disable + * based on compression level. + * + * Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is + * set to ZSTD_sf_explicitBlockDelimiters. That may change in the future. + */ +#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. @@ -2105,7 +2366,7 @@ * in the range [dst, dst + pos) MUST not be modified during decompression * or you will get data corruption. * - * When this flags is enabled zstd won't allocate an output buffer, because + * When this flag is enabled zstd won't allocate an output buffer, because * it can write directly to the ZSTD_outBuffer, but it will still allocate * an input buffer large enough to fit any compressed block. This will also * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. @@ -2158,6 +2419,17 @@ */ #define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 +/* ZSTD_d_disableHuffmanAssembly + * Set to 1 to disable the Huffman assembly implementation. + * The default value is 0, which allows zstd to use the Huffman assembly + * implementation if available. + * + * This parameter can be used to disable Huffman assembly at runtime. + * If you want to disable it at compile time you can define the macro + * ZSTD_DISABLE_ASM. + */ +#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5 + /*! ZSTD_DCtx_setFormat() : * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). @@ -2166,6 +2438,7 @@ * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : @@ -2202,6 +2475,7 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); @@ -2219,17 +2493,15 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /*! ZSTD_initCStream_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd parameter and leave the rest as-is. - * for ((param, value) : params) { - * ZSTD_CCtx_setParameter(zcs, param, value); - * } + * ZSTD_CCtx_setParams(zcs, params); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * @@ -2239,6 +2511,7 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, @@ -2253,15 +2526,13 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /*! ZSTD_initCStream_usingCDict_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - * for ((fParam, value) : fParams) { - * ZSTD_CCtx_setParameter(zcs, fParam, value); - * } + * ZSTD_CCtx_setFParams(zcs, fParams); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * @@ -2271,6 +2542,7 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, @@ -2295,6 +2567,7 @@ * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +ZSTDLIB_STATIC_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); @@ -2340,8 +2613,8 @@ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! @@ -2351,8 +2624,8 @@ * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! @@ -2361,17 +2634,185 @@ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * * re-use decompression parameters from previous init; saves dictionary loading - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ +ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions") ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API ********************* + * + * *** OVERVIEW *** + * The Block-Level Sequence Producer API allows users to provide their own custom + * sequence producer which libzstd invokes to process each block. The produced list + * of sequences (literals and matches) is then post-processed by libzstd to produce + * valid compressed blocks. + * + * This block-level offload API is a more granular complement of the existing + * frame-level offload API compressSequences() (introduced in v1.5.1). It offers + * an easier migration story for applications already integrated with libzstd: the + * user application continues to invoke the same compression functions + * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits + * from the specific advantages of the external sequence producer. For example, + * the sequence producer could be tuned to take advantage of known characteristics + * of the input, to offer better speed / ratio, or could leverage hardware + * acceleration not available within libzstd itself. + * + * See contrib/externalSequenceProducer for an example program employing the + * Block-Level Sequence Producer API. + * + * *** USAGE *** + * The user is responsible for implementing a function of type + * ZSTD_sequenceProducer_F. For each block, zstd will pass the following + * arguments to the user-provided function: + * + * - sequenceProducerState: a pointer to a user-managed state for the sequence + * producer. + * + * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer. + * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory + * backing outSeqs is managed by the CCtx. + * + * - src, srcSize: an input buffer for the sequence producer to parse. + * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX. + * + * - dict, dictSize: a history buffer, which may be empty, which the sequence + * producer may reference as it parses the src buffer. Currently, zstd will + * always pass dictSize == 0 into external sequence producers, but this will + * change in the future. + * + * - compressionLevel: a signed integer representing the zstd compression level + * set by the user for the current operation. The sequence producer may choose + * to use this information to change its compression strategy and speed/ratio + * tradeoff. Note: the compression level does not reflect zstd parameters set + * through the advanced API. + * + * - windowSize: a size_t representing the maximum allowed offset for external + * sequences. Note that sequence offsets are sometimes allowed to exceed the + * windowSize if a dictionary is present, see doc/zstd_compression_format.md + * for details. + * + * The user-provided function shall return a size_t representing the number of + * sequences written to outSeqs. This return value will be treated as an error + * code if it is greater than outSeqsCapacity. The return value must be non-zero + * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided + * for convenience, but any value greater than outSeqsCapacity will be treated as + * an error code. + * + * If the user-provided function does not return an error code, the sequences + * written to outSeqs must be a valid parse of the src buffer. Data corruption may + * occur if the parse is not valid. A parse is defined to be valid if the + * following conditions hold: + * - The sum of matchLengths and literalLengths must equal srcSize. + * - All sequences in the parse, except for the final sequence, must have + * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have + * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0. + * - All offsets must respect the windowSize parameter as specified in + * doc/zstd_compression_format.md. + * - If the final sequence has matchLength == 0, it must also have offset == 0. + * + * zstd will only validate these conditions (and fail compression if they do not + * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence + * validation has a performance cost. + * + * If the user-provided function returns an error, zstd will either fall back + * to an internal sequence producer or fail the compression operation. The user can + * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback + * cParam. Fallback compression will follow any other cParam settings, such as + * compression level, the same as in a normal compression operation. + * + * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F + * function by calling + * ZSTD_registerSequenceProducer(cctx, + * sequenceProducerState, + * sequenceProducer) + * This setting will persist until the next parameter reset of the CCtx. + * + * The sequenceProducerState must be initialized by the user before calling + * ZSTD_registerSequenceProducer(). The user is responsible for destroying the + * sequenceProducerState. + * + * *** LIMITATIONS *** + * This API is compatible with all zstd compression APIs which respect advanced parameters. + * However, there are three limitations: + * + * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported. + * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level + * external sequence producer. + * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some + * cases (see its documentation for details). Users must explicitly set + * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external + * sequence producer is registered. + * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default + * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should + * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence + * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog). + * + * Second, history buffers are not currently supported. Concretely, zstd will always pass + * dictSize == 0 to the external sequence producer (for now). This has two implications: + * - Dictionaries are not currently supported. Compression will *not* fail if the user + * references a dictionary, but the dictionary won't have any effect. + * - Stream history is not currently supported. All advanced compression APIs, including + * streaming APIs, work with external sequence producers, but each block is treated as + * an independent chunk without history from previous blocks. + * + * Third, multi-threading within a single compression is not currently supported. In other words, + * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered. + * Multi-threading across compressions is fine: simply create one CCtx per thread. + * + * Long-term, we plan to overcome all three limitations. There is no technical blocker to + * overcoming them. It is purely a question of engineering effort. + */ + +#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1)) + +typedef size_t ZSTD_sequenceProducer_F ( + void* sequenceProducerState, + ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel, + size_t windowSize +); + +/*! ZSTD_registerSequenceProducer() : + * Instruct zstd to use a block-level external sequence producer function. + * + * The sequenceProducerState must be initialized by the caller, and the caller is + * responsible for managing its lifetime. This parameter is sticky across + * compressions. It will remain set until the user explicitly resets compression + * parameters. + * + * Sequence producer registration is considered to be an "advanced parameter", + * part of the "advanced API". This means it will only have an effect on compression + * APIs which respect advanced parameters, such as compress2() and compressStream2(). + * Older compression APIs such as compressCCtx(), which predate the introduction of + * "advanced parameters", will ignore any external sequence producer setting. + * + * The sequence producer can be "cleared" by registering a NULL function pointer. This + * removes all limitations described above in the "LIMITATIONS" section of the API docs. + * + * The user is strongly encouraged to read the full API documentation (above) before + * calling this function. */ +ZSTDLIB_STATIC_API void +ZSTD_registerSequenceProducer( + ZSTD_CCtx* cctx, + void* sequenceProducerState, + ZSTD_sequenceProducer_F* sequenceProducer +); + + /********************************************************************* -* Buffer-less and synchronous inner streaming functions +* Buffer-less and synchronous inner streaming functions (DEPRECATED) +* +* This API is deprecated, and will be removed in a future version. +* It allows streaming (de)compression with user allocated buffers. +* However, it is hard to use, and not as well tested as the rest of +* our API. * -* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. -* But it's also a complex one, with several restrictions, documented below. -* Prefer normal streaming API for an easier experience. +* Please use the normal streaming API instead: ZSTD_compressStream2, +* and ZSTD_decompressStream. +* If there is functionality that you need, but it doesn't provide, +* please open an issue on our GitHub. ********************************************************************* */ /** @@ -2383,7 +2824,6 @@ Start by initializing a context. Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : @@ -2405,18 +2845,28 @@ */ /*===== Buffer-less streaming compression functions =====*/ +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.") +ZSTDLIB_STATIC_API +size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ ZSTD_DEPRECATED("use advanced API to access custom parameters") +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ /** Buffer-less streaming decompression (synchronous mode) @@ -2429,8 +2879,8 @@ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. Data fragment must be large enough to ensure successful decoding. `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, @@ -2449,7 +2899,7 @@ The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). + which can return an error code if required value is too large for current system (in 32-bits mode). In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. @@ -2469,7 +2919,7 @@ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). @@ -2492,27 +2942,7 @@ */ /*===== Buffer-less streaming decompression functions =====*/ -typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; -typedef struct { - unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ - unsigned blockSizeMax; - ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - unsigned headerSize; - unsigned dictID; - unsigned checksumFlag; -} ZSTD_frameHeader; -/*! ZSTD_getFrameHeader() : - * decode Frame Header, or requires larger `srcSize`. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -/*! ZSTD_getFrameHeader_advanced() : - * same as ZSTD_getFrameHeader(), - * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); @@ -2523,6 +2953,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ +ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.") ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); @@ -2530,11 +2961,23 @@ -/* ============================ */ -/** Block level API */ -/* ============================ */ +/* ========================================= */ +/** Block level API (DEPRECATED) */ +/* ========================================= */ /*! + + This API is deprecated in favor of the regular compression API. + You can get the frame header down to 2 bytes by setting: + - ZSTD_c_format = ZSTD_f_zstd1_magicless + - ZSTD_c_contentSizeFlag = 0 + - ZSTD_c_checksumFlag = 0 + - ZSTD_c_dictIDFlag = 0 + + This API is not as well tested as our normal API, so we recommend not using it. + We will be removing it in a future version. If the normal API doesn't provide + the functionality you need, please open a GitHub issue. + Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. @@ -2545,7 +2988,6 @@ - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. @@ -2562,12 +3004,15 @@ */ /*===== Raw zstd block functions =====*/ +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ - #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ #if defined (__cplusplus) diff --git a/sys/contrib/zstd/lib/zstd_errors.h b/sys/contrib/zstd/lib/zstd_errors.h --- a/sys/contrib/zstd/lib/zstd_errors.h +++ b/sys/contrib/zstd/lib/zstd_errors.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -20,19 +20,31 @@ /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDERRORLIB_VISIBLE + /* Backwards compatibility with old macro name */ +# ifdef ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY +# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default"))) # else -# define ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_VISIBLE # endif #endif + +#ifndef ZSTDERRORLIB_HIDDEN +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDERRORLIB_HIDDEN +# endif +#endif + #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE #endif /*-********************************************* @@ -58,14 +70,17 @@ ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, + ZSTD_error_literals_headerWrong = 24, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_combination_unsupported = 41, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, @@ -73,11 +88,15 @@ ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, + ZSTD_error_noForwardProgress_destFull = 80, + ZSTD_error_noForwardProgress_inputEmpty = 82, /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, ZSTD_error_dstBuffer_wrong = 104, ZSTD_error_srcBuffer_wrong = 105, + ZSTD_error_sequenceProducer_failed = 106, + ZSTD_error_externalSequences_invalid = 107, ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; diff --git a/sys/contrib/zstd/programs/Makefile b/sys/contrib/zstd/programs/Makefile --- a/sys/contrib/zstd/programs/Makefile +++ b/sys/contrib/zstd/programs/Makefile @@ -1,5 +1,5 @@ # ################################################################ -# Copyright (c) Yann Collet, Facebook, Inc. +# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the @@ -129,10 +129,10 @@ .PHONY: all -all: zstd +all: zstd zstd-compress zstd-decompress zstd-small .PHONY: allVariants -allVariants: zstd zstd-compress zstd-decompress zstd-small zstd-frugal zstd-nolegacy zstd-dictBuilder +allVariants: all zstd-frugal zstd-nolegacy zstd-dictBuilder .PHONY: zstd # must always be run zstd : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP) $(LZMACPP) $(LZ4CPP) @@ -180,11 +180,13 @@ endif # BUILD_DIR +CLEAN += zstd .PHONY: zstd-release zstd-release: DEBUGFLAGS := -DBACKTRACE_ENABLE=0 zstd-release: DEBUGFLAGS_LD := zstd-release: zstd +CLEAN += zstd32 zstd32 : CPPFLAGS += $(THREAD_CPP) zstd32 : LDFLAGS += $(THREAD_LD) zstd32 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) @@ -194,7 +196,8 @@ zstd32 : $(ZSTDLIB_FULL_SRC) $(ZSTD_CLI_SRC) $(CC) -m32 $(FLAGS) $^ -o $@$(EXT) -## zstd-nolegacy: same scope as zstd, with just support of legacy formats removed +## zstd-nolegacy: same scope as zstd, with removed support of legacy formats +CLEAN += zstd-nolegacy zstd-nolegacy : LDFLAGS += $(THREAD_LD) $(ZLIBLD) $(LZMALD) $(LZ4LD) $(DEBUGFLAGS_LD) zstd-nolegacy : CPPFLAGS += -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 zstd-nolegacy : $(ZSTDLIB_CORE_SRC) $(ZDICT_SRC) $(ZSTD_CLI_OBJ) @@ -222,40 +225,52 @@ .PHONY: zstd-dll zstd-dll : LDFLAGS+= -L$(LIBZSTD) zstd-dll : LDLIBS += -lzstd -zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c +zstd-dll : ZSTDLIB_LOCAL_SRC = xxhash.c pool.c threading.c zstd-dll : zstd ## zstd-pgo: zstd executable optimized with PGO. .PHONY: zstd-pgo +zstd-pgo : LLVM_PROFDATA?=llvm-profdata +zstd-pgo : PROF_GENERATE_FLAGS=-fprofile-generate $(if $(findstring gcc,$(CC)),-fprofile-dir=.) +zstd-pgo : PROF_USE_FLAGS=-fprofile-use $(if $(findstring gcc,$(CC)),-fprofile-dir=. -Werror=missing-profile -Wno-error=coverage-mismatch) zstd-pgo : - $(MAKE) clean - $(MAKE) zstd MOREFLAGS=-fprofile-generate + $(MAKE) clean HASH_DIR=$(HASH_DIR) + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_GENERATE_FLAGS)" ./zstd -b19i1 $(PROFILE_WITH) ./zstd -b16i1 $(PROFILE_WITH) ./zstd -b9i2 $(PROFILE_WITH) ./zstd -b $(PROFILE_WITH) ./zstd -b7i2 $(PROFILE_WITH) ./zstd -b5 $(PROFILE_WITH) - $(RM) zstd *.o - case $(CC) in *clang*) if ! [ -e default.profdata ]; then llvm-profdata merge -output=default.profdata default*.profraw; fi ;; esac - $(MAKE) zstd MOREFLAGS=-fprofile-use +ifndef BUILD_DIR + $(RM) zstd obj/$(HASH_DIR)/zstd obj/$(HASH_DIR)/*.o +else + $(RM) zstd $(BUILD_DIR)/zstd $(BUILD_DIR)/*.o +endif + case $(CC) in *clang*) if ! [ -e default.profdata ]; then $(LLVM_PROFDATA) merge -output=default.profdata default*.profraw; fi ;; esac + $(MAKE) zstd HASH_DIR=$(HASH_DIR) MOREFLAGS="$(PROF_USE_FLAGS)" ## zstd-small: minimal target, supporting only zstd compression and decompression. no bench. no legacy. no other format. -zstd-small: CFLAGS = -Os -s -zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-small zstd-frugal +zstd-small: CFLAGS = -Os -Wl,-s +zstd-frugal zstd-small: $(ZSTDLIB_CORE_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-decompress +zstd-decompress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_DECOMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) -zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c +CLEAN += zstd-compress +zstd-compress: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS -DZSTD_NOTRACE -UZSTD_LEGACY_SUPPORT -DZSTD_LEGACY_SUPPORT=0 $^ -o $@$(EXT) ## zstd-dictBuilder: executable supporting dictionary creation and compression (only) -zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c dibio.c +CLEAN += zstd-dictBuilder +zstd-dictBuilder: $(ZSTDLIB_COMMON_SRC) $(ZSTDLIB_COMPRESS_SRC) $(ZDICT_SRC) zstdcli.c util.c timefn.c fileio.c fileio_asyncio.c dibio.c $(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODECOMPRESS -DZSTD_NOTRACE $^ -o $@$(EXT) +CLEAN += zstdmt zstdmt: zstd ln -sf zstd zstdmt @@ -264,7 +279,7 @@ ifneq (,$(filter Windows%,$(OS))) RC ?= windres -# http://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable +# https://stackoverflow.com/questions/708238/how-do-i-add-an-icon-to-a-mingw-gcc-compiled-executable $(RES64_FILE): windres/zstd.rc $(RC) -o $@ -I ../lib -I windres -i $< -O coff -F pe-x86-64 $(RES32_FILE): windres/zstd.rc @@ -273,12 +288,8 @@ .PHONY: clean clean: - $(RM) core *.o tmp* result* *.gcda dictionary *.zst \ - zstd$(EXT) zstd32$(EXT) zstd-dll$(EXT) \ - zstd-compress$(EXT) zstd-decompress$(EXT) \ - zstd-small$(EXT) zstd-frugal$(EXT) zstd-nolegacy$(EXT) zstd4$(EXT) \ - zstd-dictBuilder$(EXT) \ - *.gcda default*.profraw default.profdata have_zlib$(EXT) + $(RM) $(CLEAN) core *.o tmp* result* dictionary *.zst \ + *.gcda default*.profraw default.profdata have_zlib $(RM) -r obj/* @echo Cleaning completed @@ -339,7 +350,7 @@ HAVE_COLORNEVER = $(shell echo a | egrep --color=never a > /dev/null 2> /dev/null && echo 1 || echo 0) EGREP_OPTIONS ?= -ifeq ($HAVE_COLORNEVER, 1) +ifeq ($(HAVE_COLORNEVER), 1) EGREP_OPTIONS += --color=never endif EGREP = egrep $(EGREP_OPTIONS) diff --git a/sys/contrib/zstd/programs/README.md b/sys/contrib/zstd/programs/README.md --- a/sys/contrib/zstd/programs/README.md +++ b/sys/contrib/zstd/programs/README.md @@ -164,6 +164,7 @@ --filelist FILE : read list of files to operate upon from FILE --output-dir-flat DIR : processed files are stored into DIR --output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure +--[no-]asyncio : use asynchronous IO (default: enabled) --[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled). If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate). -- : All arguments after "--" are treated as files @@ -208,7 +209,7 @@ -b# : benchmark file(s), using # compression level (default: 3) -e# : test all compression levels successively from -b# to -e# (default: 1) -i# : minimum evaluation time in seconds (default: 3s) - -B# : cut file into independent blocks of size # (default: no block) + -B# : cut file into independent chunks of size # (default: no chunking) -S : output one benchmark result per input file (default: consolidated result) --priority=rt : set process priority to real-time ``` @@ -275,7 +276,7 @@ The below table illustrates this on the [Silesia compression corpus]. -[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia +[Silesia compression corpus]: https://sun.aei.polsl.pl//~sdeor/index.php?page=silesia | Method | Compression ratio | Compression speed | Decompression speed | |:-------|------------------:|------------------:|---------------------:| diff --git a/sys/contrib/zstd/programs/benchfn.h b/sys/contrib/zstd/programs/benchfn.h --- a/sys/contrib/zstd/programs/benchfn.h +++ b/sys/contrib/zstd/programs/benchfn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -123,7 +123,7 @@ /* when benchmark failed, it means one invocation of `benchFn` failed. * The failure was detected by `errorFn`, operating on return values of `benchFn`. * Returns the faulty return value. - * note : this function will abort() program execution if benchmark did not failed. + * note : this function will abort() program execution if benchmark did not fail. * always check if benchmark failed first ! */ size_t BMK_extract_errorResult(BMK_runOutcome_t outcome); diff --git a/sys/contrib/zstd/programs/benchfn.c b/sys/contrib/zstd/programs/benchfn.c --- a/sys/contrib/zstd/programs/benchfn.c +++ b/sys/contrib/zstd/programs/benchfn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -229,9 +229,9 @@ cont->timeSpent_ns += (unsigned long long)loopDuration_ns; /* estimate nbLoops for next run to last approximately 1 second */ - if (loopDuration_ns > (runBudget_ns / 50)) { + if (loopDuration_ns > ((double)runBudget_ns / 50)) { double const fastestRun_ns = MIN(bestRunTime.nanoSecPerRun, newRunTime.nanoSecPerRun); - cont->nbLoops = (unsigned)(runBudget_ns / fastestRun_ns) + 1; + cont->nbLoops = (unsigned)((double)runBudget_ns / fastestRun_ns) + 1; } else { /* previous run was too short : blindly increase workload by x multiplier */ const unsigned multiplier = 10; @@ -239,7 +239,7 @@ cont->nbLoops *= multiplier; } - if(loopDuration_ns < runTimeMin_ns) { + if(loopDuration_ns < (double)runTimeMin_ns) { /* don't report results for which benchmark run time was too small : increased risks of rounding errors */ assert(completed == 0); continue; diff --git a/sys/contrib/zstd/programs/benchzstd.h b/sys/contrib/zstd/programs/benchzstd.h --- a/sys/contrib/zstd/programs/benchzstd.h +++ b/sys/contrib/zstd/programs/benchzstd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -81,21 +81,13 @@ * 2 : + result + interaction + warnings; * 3 : + information; * 4 : + debug - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel); +int BMK_benchFiles( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int cLevel, const ZSTD_compressionParameters* compressionParams, + int displayLevel); typedef enum { @@ -126,11 +118,11 @@ /*! BMK_benchFilesAdvanced(): * Same as BMK_benchFiles(), * with more controls, provided through advancedParams_t structure */ -BMK_benchOutcome_t BMK_benchFilesAdvanced( - const char* const * fileNamesTable, unsigned nbFiles, - const char* dictFileName, - int cLevel, const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_benchFilesAdvanced( + const char* const * fileNamesTable, unsigned nbFiles, + const char* dictFileName, + int cLevel, const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); /*! BMK_syntheticTest() -- called from zstdcli */ /* Generates a sample with datagen, using compressibility argument */ @@ -139,20 +131,11 @@ * compressionParams - basic compression Parameters * displayLevel - see benchFiles * adv - see advanced_Params_t - * @return: - * a variant, which expresses either an error, or a valid result. - * Use BMK_isSuccessful_benchOutcome() to check if function was successful. - * If yes, extract the valid result with BMK_extract_benchResult(), - * it will contain : - * .cSpeed: compression speed in bytes per second, - * .dSpeed: decompression speed in bytes per second, - * .cSize : compressed size, in bytes - * .cMem : memory budget required for the compression context + * @return: 0 on success, !0 on error */ -BMK_benchOutcome_t BMK_syntheticTest( - int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv); +int BMK_syntheticTest(int cLevel, double compressibility, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv); @@ -190,8 +173,8 @@ int displayLevel, const char* displayName); -/* BMK_benchMemAdvanced() : same as BMK_benchMem() - * with following additional options : +/* BMK_benchMemAdvanced() : used by Paramgrill + * same as BMK_benchMem() with following additional options : * dstBuffer - destination buffer to write compressed output in, NULL if none provided. * dstCapacity - capacity of destination buffer, give 0 if dstBuffer = NULL * adv = see advancedParams_t diff --git a/sys/contrib/zstd/programs/benchzstd.c b/sys/contrib/zstd/programs/benchzstd.c --- a/sys/contrib/zstd/programs/benchzstd.c +++ b/sys/contrib/zstd/programs/benchzstd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -13,7 +13,7 @@ * Tuning parameters ****************************************/ #ifndef BMK_TIMETEST_DEFAULT_S /* default minimum time per test */ -#define BMK_TIMETEST_DEFAULT_S 3 +# define BMK_TIMETEST_DEFAULT_S 3 #endif @@ -327,26 +327,31 @@ /* init */ memset(&benchResult, 0, sizeof(benchResult)); if (strlen(displayName)>17) displayName += strlen(displayName) - 17; /* display last 17 characters */ - if (adv->mode == BMK_decodeOnly) { /* benchmark only decompression : source must be already compressed */ + if (adv->mode == BMK_decodeOnly) { + /* benchmark only decompression : source must be already compressed */ const char* srcPtr = (const char*)srcBuffer; U64 totalDSize64 = 0; U32 fileNb; for (fileNb=0; fileNb decodedSize) { /* size_t overflow */ + RETURN_ERROR(32, BMK_benchOutcome_t, "decompressed size is too large for local system"); + } *resultBufferPtr = malloc(decodedSize); if (!(*resultBufferPtr)) { - RETURN_ERROR(33, BMK_benchOutcome_t, "not enough memory"); - } - if (totalDSize64 > decodedSize) { /* size_t overflow */ - free(*resultBufferPtr); - RETURN_ERROR(32, BMK_benchOutcome_t, "original size is too large"); + RETURN_ERROR(33, BMK_benchOutcome_t, "allocation error: not enough memory"); } cSize = srcSize; srcSize = decodedSize; @@ -387,6 +392,10 @@ RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); } + if (!UTIL_support_MT_measurements() && adv->nbWorkers > 1) { + OUTPUTLEVEL(2, "Warning : time measurements may be incorrect in multithreading mode... \n") + } + /* Bench */ { U64 const crcOrig = (adv->mode == BMK_decodeOnly) ? 0 : XXH64(srcBuffer, srcSize, 0); # define NB_MARKS 4 @@ -442,7 +451,7 @@ BMK_runOutcome_t const cOutcome = BMK_benchTimedFn( timeStateCompress, cbp); if (!BMK_isSuccessful_runOutcome(cOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "compression error"); } { BMK_runTime_t const cResult = BMK_extract_runTime(cOutcome); @@ -470,7 +479,7 @@ BMK_runOutcome_t const dOutcome = BMK_benchTimedFn(timeStateDecompress, dbp); if(!BMK_isSuccessful_runOutcome(dOutcome)) { - return BMK_benchOutcome_error(); + RETURN_ERROR(30, BMK_benchOutcome_t, "decompression error"); } { BMK_runTime_t const dResult = BMK_extract_runTime(dOutcome); @@ -594,7 +603,7 @@ void* resultBuffer = srcSize ? malloc(srcSize) : NULL; - int allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || + int const allocationincomplete = !srcPtrs || !srcSizes || !cPtrs || !cSizes || !cCapacities || !resPtrs || !resSizes || !timeStateCompress || !timeStateDecompress || !cctx || !dctx || @@ -688,9 +697,9 @@ displayLevel, displayName, adv); } -BMK_benchOutcome_t BMK_syntheticTest(int cLevel, double compressibility, - const ZSTD_compressionParameters* compressionParams, - int displayLevel, const BMK_advancedParams_t* adv) +int BMK_syntheticTest(int cLevel, double compressibility, + const ZSTD_compressionParameters* compressionParams, + int displayLevel, const BMK_advancedParams_t* adv) { char name[20] = {0}; size_t const benchedSize = 10000000; @@ -698,12 +707,16 @@ BMK_benchOutcome_t res; if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); + DISPLAYLEVEL(1, "Invalid Compression Level"); + return 15; } /* Memory allocation */ srcBuffer = malloc(benchedSize); - if (!srcBuffer) RETURN_ERROR(21, BMK_benchOutcome_t, "not enough memory"); + if (!srcBuffer) { + DISPLAYLEVEL(1, "allocation error : not enough memory"); + return 16; + } /* Fill input buffer */ RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); @@ -719,7 +732,7 @@ /* clean up */ free(srcBuffer); - return res; + return !BMK_isSuccessful_benchOutcome(res); } @@ -781,7 +794,7 @@ return 0; } -BMK_benchOutcome_t BMK_benchFilesAdvanced( +int BMK_benchFilesAdvanced( const char* const * fileNamesTable, unsigned nbFiles, const char* dictFileName, int cLevel, const ZSTD_compressionParameters* compressionParams, @@ -796,19 +809,25 @@ U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); if (!nbFiles) { - RETURN_ERROR(14, BMK_benchOutcome_t, "No Files to Benchmark"); + DISPLAYLEVEL(1, "No Files to Benchmark"); + return 13; } if (cLevel > ZSTD_maxCLevel()) { - RETURN_ERROR(15, BMK_benchOutcome_t, "Invalid Compression Level"); + DISPLAYLEVEL(1, "Invalid Compression Level"); + return 14; } if (totalSizeToLoad == UTIL_FILESIZE_UNKNOWN) { - RETURN_ERROR(9, BMK_benchOutcome_t, "Error loading files"); + DISPLAYLEVEL(1, "Error loading files"); + return 15; } fileSizes = (size_t*)calloc(nbFiles, sizeof(size_t)); - if (!fileSizes) RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory for fileSizes"); + if (!fileSizes) { + DISPLAYLEVEL(1, "not enough memory for fileSizes"); + return 16; + } /* Load dictionary */ if (dictFileName != NULL) { @@ -816,18 +835,21 @@ if (dictFileSize == UTIL_FILESIZE_UNKNOWN) { DISPLAYLEVEL(1, "error loading %s : %s \n", dictFileName, strerror(errno)); free(fileSizes); - RETURN_ERROR(9, BMK_benchOutcome_t, "benchmark aborted"); + DISPLAYLEVEL(1, "benchmark aborted"); + return 17; } if (dictFileSize > 64 MB) { free(fileSizes); - RETURN_ERROR(10, BMK_benchOutcome_t, "dictionary file %s too large", dictFileName); + DISPLAYLEVEL(1, "dictionary file %s too large", dictFileName); + return 18; } dictBufferSize = (size_t)dictFileSize; dictBuffer = malloc(dictBufferSize); if (dictBuffer==NULL) { free(fileSizes); - RETURN_ERROR(11, BMK_benchOutcome_t, "not enough memory for dictionary (%u bytes)", + DISPLAYLEVEL(1, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize); + return 19; } { int const errorCode = BMK_loadFiles(dictBuffer, dictBufferSize, @@ -849,7 +871,8 @@ if (!srcBuffer) { free(dictBuffer); free(fileSizes); - RETURN_ERROR(12, BMK_benchOutcome_t, "not enough memory"); + DISPLAYLEVEL(1, "not enough memory for srcBuffer"); + return 20; } /* Load input buffer */ @@ -877,12 +900,11 @@ free(srcBuffer); free(dictBuffer); free(fileSizes); - return res; + return !BMK_isSuccessful_benchOutcome(res); } -BMK_benchOutcome_t BMK_benchFiles( - const char* const * fileNamesTable, unsigned nbFiles, +int BMK_benchFiles(const char* const * fileNamesTable, unsigned nbFiles, const char* dictFileName, int cLevel, const ZSTD_compressionParameters* compressionParams, int displayLevel) diff --git a/sys/contrib/zstd/programs/datagen.h b/sys/contrib/zstd/programs/datagen.h --- a/sys/contrib/zstd/programs/datagen.h +++ b/sys/contrib/zstd/programs/datagen.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/programs/datagen.c b/sys/contrib/zstd/programs/datagen.c --- a/sys/contrib/zstd/programs/datagen.c +++ b/sys/contrib/zstd/programs/datagen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/programs/dibio.h b/sys/contrib/zstd/programs/dibio.h --- a/sys/contrib/zstd/programs/dibio.h +++ b/sys/contrib/zstd/programs/dibio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/programs/dibio.c b/sys/contrib/zstd/programs/dibio.c --- a/sys/contrib/zstd/programs/dibio.c +++ b/sys/contrib/zstd/programs/dibio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,10 +27,11 @@ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* errno */ -#include #include "timefn.h" /* UTIL_time_t, UTIL_clockSpanMicro, UTIL_getTime */ +#include "../lib/common/debug.h" /* assert */ #include "../lib/common/mem.h" /* read */ +#include "../lib/zstd_errors.h" #include "dibio.h" @@ -127,8 +128,11 @@ while ( nbSamplesLoaded < sstSize && fileIndex < nbFiles ) { size_t fileDataLoaded; S64 const fileSize = DiB_getFileSize(fileNamesTable[fileIndex]); - if (fileSize <= 0) /* skip if zero-size or file error */ + if (fileSize <= 0) { + /* skip if zero-size or file error */ + ++fileIndex; continue; + } f = fopen( fileNamesTable[fileIndex], "rb"); if (f == NULL) @@ -193,7 +197,8 @@ static void DiB_shuffle(const char** fileNamesTable, unsigned nbFiles) { U32 seed = 0xFD2FB528; unsigned i; - assert(nbFiles >= 1); + if (nbFiles == 0) + return; for (i = nbFiles - 1; i > 0; --i) { unsigned const j = DiB_rand(&seed) % (i + 1); const char* const tmp = fileNamesTable[j]; @@ -269,21 +274,20 @@ int n; memset(&fs, 0, sizeof(fs)); - // We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX + /* We assume that if chunking is requested, the chunk size is < SAMPLESIZE_MAX */ assert( chunkSize <= SAMPLESIZE_MAX ); for (n=0; n 0) - { - // TODO: is there a minimum sample size? Can we have a 1-byte sample? + if (chunkSize > 0) { + /* TODO: is there a minimum sample size? Can we have a 1-byte sample? */ fs.nbSamples += (int)((fileSize + chunkSize-1) / chunkSize); fs.totalSizeToLoad += fileSize; } @@ -350,7 +354,7 @@ } /* Checks */ - if ((!sampleSizes) || (!srcBuffer) || (!dictBuffer)) + if ((fs.nbSamples && !sampleSizes) || (!srcBuffer) || (!dictBuffer)) EXM_THROW(12, "not enough memory for DiB_trainFiles"); /* should not happen */ if (fs.oneSampleTooLarge) { DISPLAYLEVEL(2, "! Warning : some sample(s) are very large \n"); @@ -379,7 +383,7 @@ srcBuffer, &loadedSize, sampleSizes, fs.nbSamples, fileNamesTable, nbFiles, chunkSize, displayLevel); - { size_t dictSize; + { size_t dictSize = ZSTD_error_GENERIC; if (params) { DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH); /* guard band, for end of buffer condition */ dictSize = ZDICT_trainFromBuffer_legacy(dictBuffer, maxDictSize, @@ -399,8 +403,7 @@ dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, *coverParams); } - } else { - assert(fastCoverParams != NULL); + } else if (fastCoverParams != NULL) { if (optimize) { dictSize = ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, @@ -415,6 +418,8 @@ dictSize = ZDICT_trainFromBuffer_fastCover(dictBuffer, maxDictSize, srcBuffer, sampleSizes, nbSamplesLoaded, *fastCoverParams); } + } else { + assert(0 /* Impossible */); } if (ZDICT_isError(dictSize)) { DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize)); /* should not happen */ diff --git a/sys/contrib/zstd/programs/fileio.h b/sys/contrib/zstd/programs/fileio.h --- a/sys/contrib/zstd/programs/fileio.h +++ b/sys/contrib/zstd/programs/fileio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,6 +12,8 @@ #ifndef FILEIO_H_23981798732 #define FILEIO_H_23981798732 +#include "fileio_types.h" +#include "util.h" /* FileNamesTable */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ #include "../lib/zstd.h" /* ZSTD_* */ @@ -53,10 +55,6 @@ /*-************************************* * Types ***************************************/ -typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; - -typedef struct FIO_prefs_s FIO_prefs_t; - FIO_prefs_t* FIO_createPreferences(void); void FIO_freePreferences(FIO_prefs_t* const prefs); @@ -66,9 +64,6 @@ FIO_ctx_t* FIO_createContext(void); void FIO_freeContext(FIO_ctx_t* const fCtx); -typedef struct FIO_display_prefs_s FIO_display_prefs_t; - -typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; /*-************************************* * Parameters @@ -76,7 +71,7 @@ /* FIO_prefs_t functions */ void FIO_setCompressionType(FIO_prefs_t* const prefs, FIO_compressionType_t compressionType); void FIO_overwriteMode(FIO_prefs_t* const prefs); -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt); +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt); void FIO_setAdaptMin(FIO_prefs_t* const prefs, int minCLevel); void FIO_setAdaptMax(FIO_prefs_t* const prefs, int maxCLevel); void FIO_setUseRowMatchFinder(FIO_prefs_t* const prefs, int useRowMatchFinder); @@ -91,8 +86,8 @@ void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit); void FIO_setNbWorkers(FIO_prefs_t* const prefs, int nbWorkers); void FIO_setOverlapLog(FIO_prefs_t* const prefs, int overlapLog); -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag); -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag); +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse); /**< 0: no sparse; 1: disable on stdout; 2: always enabled */ void FIO_setRsyncable(FIO_prefs_t* const prefs, int rsyncable); void FIO_setStreamSrcSize(FIO_prefs_t* const prefs, size_t streamSrcSize); void FIO_setTargetCBlockSize(FIO_prefs_t* const prefs, size_t targetCBlockSize); @@ -109,6 +104,9 @@ void FIO_setPatchFromMode(FIO_prefs_t* const prefs, int value); void FIO_setContentSize(FIO_prefs_t* const prefs, int value); void FIO_displayCompressionParameters(const FIO_prefs_t* prefs); +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value); +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value); +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_paramSwitch_e value); /* FIO_ctx_t functions */ void FIO_setNbFilesTotal(FIO_ctx_t* const fCtx, int value); @@ -171,6 +169,9 @@ /* custom crash signal handler */ void FIO_addAbortHandler(void); +char const* FIO_zlibVersion(void); +char const* FIO_lz4Version(void); +char const* FIO_lzmaVersion(void); #if defined (__cplusplus) diff --git a/sys/contrib/zstd/programs/fileio.c b/sys/contrib/zstd/programs/fileio.c --- a/sys/contrib/zstd/programs/fileio.c +++ b/sys/contrib/zstd/programs/fileio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -28,6 +28,7 @@ #include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ #include /* malloc, free */ #include /* strcmp, strlen */ +#include /* clock_t, to measure process time */ #include /* O_WRONLY */ #include #include /* errno */ @@ -40,8 +41,12 @@ # include #endif -#include "../lib/common/mem.h" /* U32, U64 */ #include "fileio.h" +#include "fileio_asyncio.h" +#include "fileio_common.h" + +FIO_display_prefs_t g_display_prefs = {2, FIO_ps_auto}; +UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_magicNumber, ZSTD_frameHeaderSize_max */ #include "../lib/zstd.h" @@ -65,6 +70,40 @@ # include #endif +char const* FIO_zlibVersion(void) +{ +#if defined(ZSTD_GZCOMPRESS) || defined(ZSTD_GZDECOMPRESS) + return zlibVersion(); +#else + return "Unsupported"; +#endif +} + +char const* FIO_lz4Version(void) +{ +#if defined(ZSTD_LZ4COMPRESS) || defined(ZSTD_LZ4DECOMPRESS) + /* LZ4_versionString() added in v1.7.3 */ +# if LZ4_VERSION_NUMBER >= 10703 + return LZ4_versionString(); +# else +# define ZSTD_LZ4_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +# define ZSTD_LZ4_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LZ4_VERSION) + return ZSTD_LZ4_VERSION_STRING; +# endif +#else + return "Unsupported"; +#endif +} + +char const* FIO_lzmaVersion(void) +{ +#if defined(ZSTD_LZMACOMPRESS) || defined(ZSTD_LZMADECOMPRESS) + return lzma_version_string(); +#else + return "Unsupported"; +#endif +} + /*-************************************* * Constants @@ -75,69 +114,17 @@ #define FNSPACE 30 /* Default file permissions 0666 (modulated by umask) */ +/* Temporary restricted file permissions are used when we're going to + * chmod/chown at the end of the operation. */ #if !defined(_WIN32) /* These macros aren't defined on windows. */ #define DEFAULT_FILE_PERMISSIONS (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) +#define TEMPORARY_FILE_PERMISSIONS (S_IRUSR|S_IWUSR) #else #define DEFAULT_FILE_PERMISSIONS (0666) +#define TEMPORARY_FILE_PERMISSIONS (0600) #endif -/*-************************************* -* Macros -***************************************/ -#define KB *(1 <<10) -#define MB *(1 <<20) -#define GB *(1U<<30) -#undef MAX -#define MAX(a,b) ((a)>(b) ? (a) : (b)) - -struct FIO_display_prefs_s { - int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ - FIO_progressSetting_e progressSetting; -}; - -static FIO_display_prefs_t g_display_prefs = {2, FIO_ps_auto}; - -#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) -#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) -#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } - -static const U64 g_refreshRate = SEC_TO_MICRO / 6; -static UTIL_time_t g_displayClock = UTIL_TIME_INITIALIZER; - -#define READY_FOR_UPDATE() ((g_display_prefs.progressSetting != FIO_ps_never) && UTIL_clockSpanMicro(g_displayClock) > g_refreshRate) -#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } -#define DISPLAYUPDATE(l, ...) { \ - if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ - if (READY_FOR_UPDATE() || (g_display_prefs.displayLevel>=4)) { \ - DELAY_NEXT_UPDATE(); \ - DISPLAY(__VA_ARGS__); \ - if (g_display_prefs.displayLevel>=4) fflush(stderr); \ - } } } - -#undef MIN /* in case it would be already defined */ -#define MIN(a,b) ((a) < (b) ? (a) : (b)) - - -#define EXM_THROW(error, ...) \ -{ \ - DISPLAYLEVEL(1, "zstd: "); \ - DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ - DISPLAYLEVEL(1, "error %i : ", error); \ - DISPLAYLEVEL(1, __VA_ARGS__); \ - DISPLAYLEVEL(1, " \n"); \ - exit(error); \ -} - -#define CHECK_V(v, f) \ - v = f; \ - if (ZSTD_isError(v)) { \ - DISPLAYLEVEL(5, "%s \n", #f); \ - EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ - } -#define CHECK(f) { size_t err; CHECK_V(err, f); } - - /*-************************************ * Signal (Ctrl-C trapping) **************************************/ @@ -237,7 +224,7 @@ } #endif -void FIO_addAbortHandler() +void FIO_addAbortHandler(void) { #if BACKTRACE_ENABLE signal(SIGABRT, ABRThandler); @@ -248,94 +235,6 @@ #endif } - -/*-************************************************************ -* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW -***************************************************************/ -#if defined(_MSC_VER) && _MSC_VER >= 1400 -# define LONG_SEEK _fseeki64 -# define LONG_TELL _ftelli64 -#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ -# define LONG_SEEK fseeko -# define LONG_TELL ftello -#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) -# define LONG_SEEK fseeko64 -# define LONG_TELL ftello64 -#elif defined(_WIN32) && !defined(__DJGPP__) -# include - static int LONG_SEEK(FILE* file, __int64 offset, int origin) { - LARGE_INTEGER off; - DWORD method; - off.QuadPart = offset; - if (origin == SEEK_END) - method = FILE_END; - else if (origin == SEEK_CUR) - method = FILE_CURRENT; - else - method = FILE_BEGIN; - - if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) - return 0; - else - return -1; - } - static __int64 LONG_TELL(FILE* file) { - LARGE_INTEGER off, newOff; - off.QuadPart = 0; - newOff.QuadPart = 0; - SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); - return newOff.QuadPart; - } -#else -# define LONG_SEEK fseek -# define LONG_TELL ftell -#endif - - -/*-************************************* -* Parameters: FIO_prefs_t -***************************************/ - -/* typedef'd to FIO_prefs_t within fileio.h */ -struct FIO_prefs_s { - - /* Algorithm preferences */ - FIO_compressionType_t compressionType; - U32 sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ - int dictIDFlag; - int checksumFlag; - int blockSize; - int overlapLog; - U32 adaptiveMode; - U32 useRowMatchFinder; - int rsyncable; - int minAdaptLevel; - int maxAdaptLevel; - int ldmFlag; - int ldmHashLog; - int ldmMinMatch; - int ldmBucketSizeLog; - int ldmHashRateLog; - size_t streamSrcSize; - size_t targetCBlockSize; - int srcSizeHint; - int testMode; - ZSTD_paramSwitch_e literalCompressionMode; - - /* IO preferences */ - U32 removeSrcFile; - U32 overwrite; - - /* Computation resources preferences */ - unsigned memLimit; - int nbWorkers; - - int excludeCompressedFiles; - int patchFromMode; - int contentSize; - int allowBlockDevices; -}; - /*-************************************* * Parameters: FIO_ctx_t ***************************************/ @@ -355,6 +254,18 @@ size_t totalBytesOutput; }; +static int FIO_shouldDisplayFileSummary(FIO_ctx_t const* fCtx) +{ + return fCtx->nbFilesTotal <= 1 || g_display_prefs.displayLevel >= 3; +} + +static int FIO_shouldDisplayMultipleFileSummary(FIO_ctx_t const* fCtx) +{ + int const shouldDisplay = (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1); + assert(shouldDisplay || FIO_shouldDisplayFileSummary(fCtx) || fCtx->nbFilesProcessed == 0); + return shouldDisplay; +} + /*-************************************* * Parameters: Initialization @@ -395,6 +306,8 @@ ret->literalCompressionMode = ZSTD_ps_auto; ret->excludeCompressedFiles = 0; ret->allowBlockDevices = 0; + ret->asyncIO = AIO_supported(); + ret->passThrough = -1; return ret; } @@ -443,13 +356,13 @@ void FIO_overwriteMode(FIO_prefs_t* const prefs) { prefs->overwrite = 1; } -void FIO_setSparseWrite(FIO_prefs_t* const prefs, unsigned sparse) { prefs->sparseFileSupport = sparse; } +void FIO_setSparseWrite(FIO_prefs_t* const prefs, int sparse) { prefs->sparseFileSupport = sparse; } void FIO_setDictIDFlag(FIO_prefs_t* const prefs, int dictIDFlag) { prefs->dictIDFlag = dictIDFlag; } void FIO_setChecksumFlag(FIO_prefs_t* const prefs, int checksumFlag) { prefs->checksumFlag = checksumFlag; } -void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, unsigned flag) { prefs->removeSrcFile = (flag>0); } +void FIO_setRemoveSrcFile(FIO_prefs_t* const prefs, int flag) { prefs->removeSrcFile = (flag!=0); } void FIO_setMemLimit(FIO_prefs_t* const prefs, unsigned memLimit) { prefs->memLimit = memLimit; } @@ -476,7 +389,7 @@ prefs->overlapLog = overlapLog; } -void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, unsigned adapt) { +void FIO_setAdaptiveMode(FIO_prefs_t* const prefs, int adapt) { if ((adapt>0) && (prefs->nbWorkers==0)) EXM_THROW(1, "Adaptive mode is not compatible with single thread mode \n"); prefs->adaptiveMode = adapt; @@ -558,6 +471,25 @@ prefs->contentSize = value != 0; } +void FIO_setAsyncIOFlag(FIO_prefs_t* const prefs, int value) { +#ifdef ZSTD_MULTITHREAD + prefs->asyncIO = value; +#else + (void) prefs; + (void) value; + DISPLAYLEVEL(2, "Note : asyncio is disabled (lack of multithreading support) \n"); +#endif +} + +void FIO_setPassThroughFlag(FIO_prefs_t* const prefs, int value) { + prefs->passThrough = (value != 0); +} + +void FIO_setMMapDict(FIO_prefs_t* const prefs, ZSTD_paramSwitch_e value) +{ + prefs->mmapDict = value; +} + /* FIO_ctx_t functions */ void FIO_setHasStdoutOutput(FIO_ctx_t* const fCtx, int value) { @@ -608,26 +540,26 @@ /** FIO_openSrcFile() : * condition : `srcFileName` must be non-NULL. `prefs` may be NULL. * @result : FILE* to `srcFileName`, or NULL if it fails */ -static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName) +static FILE* FIO_openSrcFile(const FIO_prefs_t* const prefs, const char* srcFileName, stat_t* statbuf) { - stat_t statbuf; int allowBlockDevices = prefs != NULL ? prefs->allowBlockDevices : 0; assert(srcFileName != NULL); + assert(statbuf != NULL); if (!strcmp (srcFileName, stdinmark)) { DISPLAYLEVEL(4,"Using stdin for input \n"); SET_BINARY_MODE(stdin); return stdin; } - if (!UTIL_stat(srcFileName, &statbuf)) { + if (!UTIL_stat(srcFileName, statbuf)) { DISPLAYLEVEL(1, "zstd: can't stat %s : %s -- ignored \n", srcFileName, strerror(errno)); return NULL; } - if (!UTIL_isRegularFileStat(&statbuf) - && !UTIL_isFIFOStat(&statbuf) - && !(allowBlockDevices && UTIL_isBlockDevStat(&statbuf)) + if (!UTIL_isRegularFileStat(statbuf) + && !UTIL_isFIFOStat(statbuf) + && !(allowBlockDevices && UTIL_isBlockDevStat(statbuf)) ) { DISPLAYLEVEL(1, "zstd: %s is not a regular file -- ignored \n", srcFileName); @@ -649,6 +581,8 @@ const char* srcFileName, const char* dstFileName, const int mode) { + int isDstRegFile; + if (prefs->testMode) return NULL; /* do not open file in test mode */ assert(dstFileName != NULL); @@ -668,11 +602,16 @@ return NULL; } + isDstRegFile = UTIL_isRegularFile(dstFileName); /* invoke once */ if (prefs->sparseFileSupport == 1) { prefs->sparseFileSupport = ZSTD_SPARSE_DEFAULT; + if (!isDstRegFile) { + prefs->sparseFileSupport = 0; + DISPLAYLEVEL(4, "Sparse File Support is disabled when output is not a file \n"); + } } - if (UTIL_isRegularFile(dstFileName)) { + if (isDstRegFile) { /* Check if destination file already exists */ #if !defined(_WIN32) /* this test does not work on Windows : @@ -685,7 +624,7 @@ if (!prefs->overwrite) { if (g_display_prefs.displayLevel <= 1) { /* No interaction possible */ - DISPLAY("zstd: %s already exists; not overwritten \n", + DISPLAYLEVEL(1, "zstd: %s already exists; not overwritten \n", dstFileName); return NULL; } @@ -717,44 +656,70 @@ #endif if (f == NULL) { DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno)); + } else { + /* An increased buffer size can provide a significant performance + * boost on some platforms. Note that providing a NULL buf with a + * size that's not 0 is not defined in ANSI C, but is defined in an + * extension. There are three possibilities here: + * 1. Libc supports the extended version and everything is good. + * 2. Libc ignores the size when buf is NULL, in which case + * everything will continue as if we didn't call `setvbuf()`. + * 3. We fail the call and execution continues but a warning + * message might be shown. + * In all cases due execution continues. For now, I believe that + * this is a more cost-effective solution than managing the buffers + * allocations ourselves (will require an API change). + */ + if (setvbuf(f, NULL, _IOFBF, 1 MB)) { + DISPLAYLEVEL(2, "Warning: setvbuf failed for %s\n", dstFileName); + } } return f; } } -/*! FIO_createDictBuffer() : - * creates a buffer, pointed by `*bufferPtr`, + +/* FIO_getDictFileStat() : + */ +static void FIO_getDictFileStat(const char* fileName, stat_t* dictFileStat) { + assert(dictFileStat != NULL); + if (fileName == NULL) return; + + if (!UTIL_stat(fileName, dictFileStat)) { + EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); + } + + if (!UTIL_isRegularFileStat(dictFileStat)) { + EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); + } +} + +/* FIO_setDictBufferMalloc() : + * allocates a buffer, pointed by `dict->dictBuffer`, * loads `filename` content into it, up to DICTSIZE_MAX bytes. * @return : loaded size * if fileName==NULL, returns 0 and a NULL pointer */ -static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName, FIO_prefs_t* const prefs) +static size_t FIO_setDictBufferMalloc(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) { FILE* fileHandle; U64 fileSize; - stat_t statbuf; + void** bufferPtr = &dict->dictBuffer; assert(bufferPtr != NULL); + assert(dictFileStat != NULL); *bufferPtr = NULL; if (fileName == NULL) return 0; DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); - if (!UTIL_stat(fileName, &statbuf)) { - EXM_THROW(31, "Stat failed on dictionary file %s: %s", fileName, strerror(errno)); - } - - if (!UTIL_isRegularFileStat(&statbuf)) { - EXM_THROW(32, "Dictionary %s must be a regular file.", fileName); - } - fileHandle = fopen(fileName, "rb"); if (fileHandle == NULL) { EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); } - fileSize = UTIL_getFileSizeStat(&statbuf); + fileSize = UTIL_getFileSizeStat(dictFileStat); { size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; if (fileSize > dictSizeMax) { @@ -774,6 +739,130 @@ return (size_t)fileSize; } +#if (PLATFORM_POSIX_VERSION > 0) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + munmap(dict->dictBuffer, dict->dictBufferSize); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + int fileHandle; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = open(fileName, O_RDONLY); + + if (fileHandle == -1) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + *bufferPtr = mmap(NULL, (size_t)fileSize, PROT_READ, MAP_PRIVATE, fileHandle, 0); + if (*bufferPtr==NULL) EXM_THROW(34, "%s", strerror(errno)); + + close(fileHandle); + return (size_t)fileSize; +} +#elif defined(_MSC_VER) || defined(_WIN32) +#include +static void FIO_munmap(FIO_Dict_t* dict) +{ + UnmapViewOfFile(dict->dictBuffer); + CloseHandle(dict->dictHandle); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + HANDLE fileHandle, mapping; + U64 fileSize; + void** bufferPtr = &dict->dictBuffer; + + assert(bufferPtr != NULL); + assert(dictFileStat != NULL); + *bufferPtr = NULL; + if (fileName == NULL) return 0; + + DISPLAYLEVEL(4,"Loading %s as dictionary \n", fileName); + + fileHandle = CreateFileA(fileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_READONLY, NULL); + + if (fileHandle == INVALID_HANDLE_VALUE) { + EXM_THROW(33, "Couldn't open dictionary %s: %s", fileName, strerror(errno)); + } + + fileSize = UTIL_getFileSizeStat(dictFileStat); + { + size_t const dictSizeMax = prefs->patchFromMode ? prefs->memLimit : DICTSIZE_MAX; + if (fileSize > dictSizeMax) { + EXM_THROW(34, "Dictionary file %s is too large (> %u bytes)", + fileName, (unsigned)dictSizeMax); /* avoid extreme cases */ + } + } + + mapping = CreateFileMapping(fileHandle, NULL, PAGE_READONLY, 0, 0, NULL); + if (mapping == NULL) { + EXM_THROW(35, "Couldn't map dictionary %s: %s", fileName, strerror(errno)); + } + + *bufferPtr = MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, (DWORD)fileSize); /* we can only cast to DWORD here because dictSize <= 2GB */ + if (*bufferPtr==NULL) EXM_THROW(36, "%s", strerror(errno)); + + dict->dictHandle = fileHandle; + return (size_t)fileSize; +} +#else +static size_t FIO_setDictBufferMMap(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat) +{ + return FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); +} +static void FIO_munmap(FIO_Dict_t* dict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; +} +#endif + +static void FIO_freeDict(FIO_Dict_t* dict) { + if (dict->dictBufferType == FIO_mallocDict) { + free(dict->dictBuffer); + dict->dictBuffer = NULL; + dict->dictBufferSize = 0; + } else if (dict->dictBufferType == FIO_mmapDict) { + FIO_munmap(dict); + } else { + assert(0); /* Should not reach this case */ + } +} + +static void FIO_initDict(FIO_Dict_t* dict, const char* fileName, FIO_prefs_t* const prefs, stat_t* dictFileStat, FIO_dictBufferType_t dictBufferType) { + dict->dictBufferType = dictBufferType; + if (dict->dictBufferType == FIO_mallocDict) { + dict->dictBufferSize = FIO_setDictBufferMalloc(dict, fileName, prefs, dictFileStat); + } else if (dict->dictBufferType == FIO_mmapDict) { + dict->dictBufferSize = FIO_setDictBufferMMap(dict, fileName, prefs, dictFileStat); + } else { + assert(0); /* Should not reach this case */ + } +} /* FIO_checkFilenameCollisions() : @@ -785,7 +874,7 @@ filenameTableSorted = (const char**) malloc(sizeof(char*) * nbFiles); if (!filenameTableSorted) { - DISPLAY("Unable to malloc new str array, not checking for name collisions\n"); + DISPLAYLEVEL(1, "Allocation error during filename collision checking \n"); return 1; } @@ -802,7 +891,7 @@ prevElem = filenameTableSorted[0]; for (u = 1; u < nbFiles; ++u) { if (strcmp(prevElem, filenameTableSorted[u]) == 0) { - DISPLAY("WARNING: Two files have same filename: %s\n", prevElem); + DISPLAYLEVEL(2, "WARNING: Two files have same filename: %s\n", prevElem); } prevElem = filenameTableSorted[u]; } @@ -885,45 +974,89 @@ FIO_setMemLimit(prefs, (unsigned)maxSize); } -/* FIO_removeMultiFilesWarning() : +/* FIO_multiFilesConcatWarning() : + * This function handles logic when processing multiple files with -o or -c, displaying the appropriate warnings/prompts. * Returns 1 if the console should abort, 0 if console should proceed. - * This function handles logic when processing multiple files with -o, displaying the appropriate warnings/prompts. * - * If -f is specified, or there is just 1 file, zstd will always proceed as usual. - * If --rm is specified, there will be a prompt asking for user confirmation. - * If -f is specified with --rm, zstd will proceed as usual - * If -q is specified with --rm, zstd will abort pre-emptively - * If neither flag is specified, zstd will prompt the user for confirmation to proceed. - * If --rm is not specified, then zstd will print a warning to the user (which can be silenced with -q). - * However, if the output is stdout, we will always abort rather than displaying the warning prompt. + * If output is stdout or test mode is active, check that `--rm` disabled. + * + * If there is just 1 file to process, zstd will proceed as usual. + * If each file get processed into its own separate destination file, proceed as usual. + * + * When multiple files are processed into a single output, + * display a warning message, then disable --rm if it's set. + * + * If -f is specified or if output is stdout, just proceed. + * If output is set with -o, prompt for confirmation. */ -static int FIO_removeMultiFilesWarning(FIO_ctx_t* const fCtx, const FIO_prefs_t* const prefs, const char* outFileName, int displayLevelCutoff) +static int FIO_multiFilesConcatWarning(const FIO_ctx_t* fCtx, FIO_prefs_t* prefs, const char* outFileName, int displayLevelCutoff) { - int error = 0; - if (fCtx->nbFilesTotal > 1 && !prefs->overwrite) { - if (g_display_prefs.displayLevel <= displayLevelCutoff) { - if (prefs->removeSrcFile) { - DISPLAYLEVEL(1, "zstd: Aborting... not deleting files and processing into dst: %s\n", outFileName); - error = 1; - } - } else { - if (!strcmp(outFileName, stdoutmark)) { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); - } else { - DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); - } - DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate the original directory tree. \n") - if (prefs->removeSrcFile) { - if (fCtx->hasStdoutOutput) { - DISPLAYLEVEL(1, "Aborting. Use -f if you really want to delete the files and output to stdout\n"); - error = 1; - } else { - error = g_display_prefs.displayLevel > displayLevelCutoff && UTIL_requireUserConfirmation("This is a destructive operation. Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); - } - } - } + if (fCtx->hasStdoutOutput) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "It's not allowed to remove input files when processed output is piped to stdout. " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + } + if (prefs->testMode) { + if (prefs->removeSrcFile) + /* this should not happen ; hard fail, to protect user's data + * note: this should rather be an assert(), but we want to be certain that user's data will not be wiped out in case it nonetheless happen */ + EXM_THROW(43, "Test mode shall not remove input files! " + "This scenario is not supposed to be possible. " + "This is a programming error. File an issue for it to be fixed."); + return 0; } - return error; + + if (fCtx->nbFilesTotal == 1) return 0; + assert(fCtx->nbFilesTotal > 1); + + if (!outFileName) return 0; + + if (fCtx->hasStdoutOutput) { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into stdout. \n"); + } else { + DISPLAYLEVEL(2, "zstd: WARNING: all input files will be processed and concatenated into a single output file: %s \n", outFileName); + } + DISPLAYLEVEL(2, "The concatenated output CANNOT regenerate original file names nor directory structure. \n") + + /* multi-input into single output : --rm is not allowed */ + if (prefs->removeSrcFile) { + DISPLAYLEVEL(2, "Since it's a destructive operation, input files will not be removed. \n"); + prefs->removeSrcFile = 0; + } + + if (fCtx->hasStdoutOutput) return 0; + if (prefs->overwrite) return 0; + + /* multiple files concatenated into single destination file using -o without -f */ + if (g_display_prefs.displayLevel <= displayLevelCutoff) { + /* quiet mode => no prompt => fail automatically */ + DISPLAYLEVEL(1, "Concatenating multiple processed inputs into a single output loses file metadata. \n"); + DISPLAYLEVEL(1, "Aborting. \n"); + return 1; + } + /* normal mode => prompt */ + return UTIL_requireUserConfirmation("Proceed? (y/n): ", "Aborting...", "yY", fCtx->hasStdinInput); +} + +static ZSTD_inBuffer setInBuffer(const void* buf, size_t s, size_t pos) +{ + ZSTD_inBuffer i; + i.src = buf; + i.size = s; + i.pos = pos; + return i; +} + +static ZSTD_outBuffer setOutBuffer(void* buf, size_t s, size_t pos) +{ + ZSTD_outBuffer o; + o.dst = buf; + o.size = s; + o.pos = pos; + return o; } #ifndef ZSTD_NOCOMPRESS @@ -932,16 +1065,12 @@ * Compression ************************************************************************/ typedef struct { - FILE* srcFile; - FILE* dstFile; - void* srcBuffer; - size_t srcBufferSize; - void* dstBuffer; - size_t dstBufferSize; - void* dictBuffer; - size_t dictBufferSize; + FIO_Dict_t dict; const char* dictFileName; + stat_t dictFileStat; ZSTD_CStream* cctx; + WritePoolCtx_t *writeCtx; + ReadPoolCtx_t *readCtx; } cRess_t; /** ZSTD_cycleLog() : @@ -973,8 +1102,8 @@ if (cParams.strategy >= ZSTD_btopt) { DISPLAYLEVEL(1, "[Optimal parser notes] Consider the following to improve patch size at the cost of speed:\n"); DISPLAYLEVEL(1, "- Use --single-thread mode in the zstd cli\n"); - DISPLAYLEVEL(1, "- Set a larger targetLength (eg. --zstd=targetLength=4096)\n"); - DISPLAYLEVEL(1, "- Set a larger chainLog (eg. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); + DISPLAYLEVEL(1, "- Set a larger targetLength (e.g. --zstd=targetLength=4096)\n"); + DISPLAYLEVEL(1, "- Set a larger chainLog (e.g. --zstd=chainLog=%u)\n", ZSTD_CHAINLOG_MAX); DISPLAYLEVEL(1, "Also consider playing around with searchLog and hashLog\n"); } } @@ -982,6 +1111,9 @@ static cRess_t FIO_createCResources(FIO_prefs_t* const prefs, const char* dictFileName, unsigned long long const maxSrcFileSize, int cLevel, ZSTD_compressionParameters comprParams) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + FIO_dictBufferType_t dictBufferType; cRess_t ress; memset(&ress, 0, sizeof(ress)); @@ -990,23 +1122,26 @@ if (ress.cctx == NULL) EXM_THROW(30, "allocation error (%s): can't create ZSTD_CCtx", strerror(errno)); - ress.srcBufferSize = ZSTD_CStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_CStreamOutSize(); + + FIO_getDictFileStat(dictFileName, &ress.dictFileStat); /* need to update memLimit before calling createDictBuffer * because of memLimit check inside it */ if (prefs->patchFromMode) { + U64 const dictSize = UTIL_getFileSizeStat(&ress.dictFileStat); unsigned long long const ssSize = (unsigned long long)prefs->streamSrcSize; - FIO_adjustParamsForPatchFromMode(prefs, &comprParams, UTIL_getFileSize(dictFileName), ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); + useMMap |= dictSize > prefs->memLimit; + FIO_adjustParamsForPatchFromMode(prefs, &comprParams, dictSize, ssSize > 0 ? ssSize : maxSrcFileSize, cLevel); } - ress.dstBuffer = malloc(ress.dstBufferSize); - ress.dictBufferSize = FIO_createDictBuffer(&ress.dictBuffer, dictFileName, prefs); /* works with dictFileName==NULL */ - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(31, "allocation error : not enough memory"); + + dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &ress.dictFileStat, dictBufferType); /* works with dictFileName==NULL */ + + ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_CStreamOutSize()); + ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_CStreamInSize()); /* Advanced parameters, including dictionary */ - if (dictFileName && (ress.dictBuffer==NULL)) + if (dictFileName && (ress.dict.dictBuffer==NULL)) EXM_THROW(32, "allocation error : can't create dictBuffer"); ress.dictFileName = dictFileName; @@ -1056,19 +1191,19 @@ #endif /* dictionary */ if (prefs->patchFromMode) { - CHECK( ZSTD_CCtx_refPrefix(ress.cctx, ress.dictBuffer, ress.dictBufferSize) ); + CHECK( ZSTD_CCtx_refPrefix(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); } else { - CHECK( ZSTD_CCtx_loadDictionary(ress.cctx, ress.dictBuffer, ress.dictBufferSize) ); + CHECK( ZSTD_CCtx_loadDictionary_byReference(ress.cctx, ress.dict.dictBuffer, ress.dict.dictBufferSize) ); } return ress; } -static void FIO_freeCResources(const cRess_t* const ress) +static void FIO_freeCResources(cRess_t* const ress) { - free(ress->srcBuffer); - free(ress->dstBuffer); - free(ress->dictBuffer); + FIO_freeDict(&(ress->dict)); + AIO_WritePool_free(ress->writeCtx); + AIO_ReadPool_free(ress->readCtx); ZSTD_freeCStream(ress->cctx); /* never fails */ } @@ -1081,6 +1216,7 @@ { unsigned long long inFileSize = 0, outFileSize = 0; z_stream strm; + IOJob_t *writeJob = NULL; if (compressionLevel > Z_BEST_COMPRESSION) compressionLevel = Z_BEST_COMPRESSION; @@ -1091,56 +1227,65 @@ { int const ret = deflateInit2(&strm, compressionLevel, Z_DEFLATED, 15 /* maxWindowLogSize */ + 16 /* gzip only */, - 8, Z_DEFAULT_STRATEGY); /* see http://www.zlib.net/manual.html */ + 8, Z_DEFAULT_STRATEGY); /* see https://www.zlib.net/manual.html */ if (ret != Z_OK) { EXM_THROW(71, "zstd: %s: deflateInit2 error %d \n", srcFileName, ret); } } + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; while (1) { int ret; if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) break; - inFileSize += inSize; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)inSize; + AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); + if (ress->readCtx->srcBufferLoaded == 0) break; + inFileSize += ress->readCtx->srcBufferLoaded; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + } + + { + size_t const availBefore = strm.avail_in; + ret = deflate(&strm, Z_NO_FLUSH); + AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); } - ret = deflate(&strm, Z_NO_FLUSH); + if (ret != Z_OK) EXM_THROW(72, "zstd: %s: deflate error %d \n", srcFileName, ret); - { size_t const cSize = ress->dstBufferSize - strm.avail_out; + { size_t const cSize = writeJob->bufferSize - strm.avail_out; if (cSize) { - if (fwrite(ress->dstBuffer, 1, cSize, ress->dstFile) != cSize) - EXM_THROW(73, "Write error : cannot write to output file : %s ", strerror(errno)); + writeJob->usedBufferSize = cSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += cSize; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } } + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + DISPLAYUPDATE_PROGRESS( + "\rRead : %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%% ", - (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + DISPLAYUPDATE_PROGRESS( + "\rRead : %u / %u MB ==> %.2f%% ", + (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), + (double)outFileSize/(double)inFileSize*100); } } while (1) { int const ret = deflate(&strm, Z_FINISH); - { size_t const cSize = ress->dstBufferSize - strm.avail_out; + { size_t const cSize = writeJob->bufferSize - strm.avail_out; if (cSize) { - if (fwrite(ress->dstBuffer, 1, cSize, ress->dstFile) != cSize) - EXM_THROW(75, "Write error : %s ", strerror(errno)); + writeJob->usedBufferSize = cSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += cSize; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - } } + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + } } if (ret == Z_STREAM_END) break; if (ret != Z_BUF_ERROR) EXM_THROW(77, "zstd: %s: deflate error %d \n", srcFileName, ret); @@ -1151,6 +1296,8 @@ EXM_THROW(79, "zstd: %s: deflateEnd error %d \n", srcFileName, ret); } } *readsize = inFileSize; + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return outFileSize; } #endif @@ -1166,6 +1313,7 @@ lzma_stream strm = LZMA_STREAM_INIT; lzma_action action = LZMA_RUN; lzma_ret ret; + IOJob_t *writeJob = NULL; if (compressionLevel < 0) compressionLevel = 0; if (compressionLevel > 9) compressionLevel = 9; @@ -1183,46 +1331,55 @@ EXM_THROW(83, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret); } + writeJob =AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; strm.next_in = 0; strm.avail_in = 0; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; while (1) { if (strm.avail_in == 0) { - size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile); - if (inSize == 0) action = LZMA_FINISH; + size_t const inSize = AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_CStreamInSize()); + if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; inFileSize += inSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = inSize; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; + } + + { + size_t const availBefore = strm.avail_in; + ret = lzma_code(&strm, action); + AIO_ReadPool_consumeBytes(ress->readCtx, availBefore - strm.avail_in); } - ret = lzma_code(&strm, action); if (ret != LZMA_OK && ret != LZMA_STREAM_END) EXM_THROW(84, "zstd: %s: lzma_code encoding error %d", srcFileName, ret); - { size_t const compBytes = ress->dstBufferSize - strm.avail_out; + { size_t const compBytes = writeJob->bufferSize - strm.avail_out; if (compBytes) { - if (fwrite(ress->dstBuffer, 1, compBytes, ress->dstFile) != compBytes) - EXM_THROW(85, "Write error : %s", strerror(errno)); + writeJob->usedBufferSize = compBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += compBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; } } if (srcFileSize == UTIL_FILESIZE_UNKNOWN) - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) else - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); if (ret == LZMA_STREAM_END) break; } lzma_end(&strm); *readsize = inFileSize; + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); + return outFileSize; } #endif @@ -1248,15 +1405,18 @@ LZ4F_preferences_t prefs; LZ4F_compressionContext_t ctx; + IOJob_t* writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + LZ4F_errorCode_t const errorCode = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION); if (LZ4F_isError(errorCode)) EXM_THROW(31, "zstd: failed to create lz4 compression context"); memset(&prefs, 0, sizeof(prefs)); - assert(blockSize <= ress->srcBufferSize); + assert(blockSize <= ress->readCtx->base.jobBufferSize); - prefs.autoFlush = 1; + /* autoflush off to mitigate a bug in lz4<=1.9.3 for compression level 12 */ + prefs.autoFlush = 0; prefs.compressionLevel = compressionLevel; prefs.frameInfo.blockMode = LZ4F_blockLinked; prefs.frameInfo.blockSizeID = LZ4F_max64KB; @@ -1264,75 +1424,68 @@ #if LZ4_VERSION_NUMBER >= 10600 prefs.frameInfo.contentSize = (srcFileSize==UTIL_FILESIZE_UNKNOWN) ? 0 : srcFileSize; #endif - assert(LZ4F_compressBound(blockSize, &prefs) <= ress->dstBufferSize); + assert(LZ4F_compressBound(blockSize, &prefs) <= writeJob->bufferSize); { - size_t readSize; - size_t headerSize = LZ4F_compressBegin(ctx, ress->dstBuffer, ress->dstBufferSize, &prefs); + size_t headerSize = LZ4F_compressBegin(ctx, writeJob->buffer, writeJob->bufferSize, &prefs); if (LZ4F_isError(headerSize)) EXM_THROW(33, "File header generation failed : %s", LZ4F_getErrorName(headerSize)); - if (fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile) != headerSize) - EXM_THROW(34, "Write error : %s (cannot write header)", strerror(errno)); + writeJob->usedBufferSize = headerSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += headerSize; /* Read first block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); /* Main Loop */ - while (readSize>0) { - size_t const outSize = LZ4F_compressUpdate(ctx, - ress->dstBuffer, ress->dstBufferSize, - ress->srcBuffer, readSize, NULL); + while (ress->readCtx->srcBufferLoaded) { + size_t inSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); + size_t const outSize = LZ4F_compressUpdate(ctx, writeJob->buffer, writeJob->bufferSize, + ress->readCtx->srcBuffer, inSize, NULL); if (LZ4F_isError(outSize)) EXM_THROW(35, "zstd: %s: lz4 compression failed : %s", srcFileName, LZ4F_getErrorName(outSize)); outFileSize += outSize; if (srcFileSize == UTIL_FILESIZE_UNKNOWN) { - DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), - (double)outFileSize/inFileSize*100) + (double)outFileSize/(double)inFileSize*100) } else { - DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", + DISPLAYUPDATE_PROGRESS("\rRead : %u / %u MB ==> %.2f%%", (unsigned)(inFileSize>>20), (unsigned)(srcFileSize>>20), - (double)outFileSize/inFileSize*100); + (double)outFileSize/(double)inFileSize*100); } /* Write Block */ - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, outSize, ress->dstFile); - if (sizeCheck != outSize) - EXM_THROW(36, "Write error : %s", strerror(errno)); - } + writeJob->usedBufferSize = outSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); /* Read next block */ - readSize = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile); - inFileSize += readSize; + AIO_ReadPool_consumeBytes(ress->readCtx, inSize); + inFileSize += AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); } - if (ferror(ress->srcFile)) EXM_THROW(37, "Error reading %s ", srcFileName); /* End of Stream mark */ - headerSize = LZ4F_compressEnd(ctx, ress->dstBuffer, ress->dstBufferSize, NULL); + headerSize = LZ4F_compressEnd(ctx, writeJob->buffer, writeJob->bufferSize, NULL); if (LZ4F_isError(headerSize)) EXM_THROW(38, "zstd: %s: lz4 end of file generation failed : %s", srcFileName, LZ4F_getErrorName(headerSize)); - { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile); - if (sizeCheck != headerSize) - EXM_THROW(39, "Write error : %s (cannot write end of stream)", - strerror(errno)); - } + writeJob->usedBufferSize = headerSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += headerSize; } *readsize = inFileSize; LZ4F_freeCompressionContext(ctx); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return outFileSize; } #endif - static unsigned long long FIO_compressZstdFrame(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, @@ -1341,8 +1494,8 @@ int compressionLevel, U64* readsize) { cRess_t const ress = *ressPtr; - FILE* const srcFile = ress.srcFile; - FILE* const dstFile = ress.dstFile; + IOJob_t *writeJob = AIO_WritePool_acquireJob(ressPtr->writeCtx); + U64 compressedfilesize = 0; ZSTD_EndDirective directive = ZSTD_e_continue; U64 pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; @@ -1356,6 +1509,9 @@ unsigned inputPresented = 0; unsigned inputBlocked = 0; unsigned lastJobID = 0; + UTIL_time_t lastAdaptTime = UTIL_getTime(); + U64 const adaptEveryMicro = REFRESH_RATE; + UTIL_HumanReadableSize_t const file_hrs = UTIL_makeHumanReadableSize(fileSize); DISPLAYLEVEL(6, "compression using zstd format \n"); @@ -1375,8 +1531,13 @@ UTIL_HumanReadableSize_t windowSize; CHECK(ZSTD_CCtx_getParameter(ress.cctx, ZSTD_c_windowLog, &windowLog)); if (windowLog == 0) { - const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); - windowLog = cParams.windowLog; + if (prefs->ldmFlag) { + /* If long mode is set without a window size libzstd will set this size internally */ + windowLog = ZSTD_WINDOWLOG_LIMIT_DEFAULT; + } else { + const ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, fileSize, 0); + windowLog = (int)cParams.windowLog; + } } windowSize = UTIL_makeHumanReadableSize(MAX(1ULL, MIN(1ULL << windowLog, pledgedSrcSize))); DISPLAYLEVEL(4, "Decompression will require %.*f%s of memory\n", windowSize.precision, windowSize.value, windowSize.suffix); @@ -1387,12 +1548,12 @@ do { size_t stillToFlush; /* Fill input Buffer */ - size_t const inSize = fread(ress.srcBuffer, (size_t)1, ress.srcBufferSize, srcFile); - ZSTD_inBuffer inBuff = { ress.srcBuffer, inSize, 0 }; + size_t const inSize = AIO_ReadPool_fillBuffer(ress.readCtx, ZSTD_CStreamInSize()); + ZSTD_inBuffer inBuff = setInBuffer( ress.readCtx->srcBuffer, ress.readCtx->srcBufferLoaded, 0 ); DISPLAYLEVEL(6, "fread %u bytes from source \n", (unsigned)inSize); *readsize += inSize; - if ((inSize == 0) || (*readsize == fileSize)) + if ((ress.readCtx->srcBufferLoaded == 0) || (*readsize == fileSize)) directive = ZSTD_e_end; stillToFlush = 1; @@ -1400,9 +1561,10 @@ || (directive == ZSTD_e_end && stillToFlush != 0) ) { size_t const oldIPos = inBuff.pos; - ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 }; + ZSTD_outBuffer outBuff = setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const toFlushNow = ZSTD_toFlushNow(ress.cctx); CHECK_V(stillToFlush, ZSTD_compressStream2(ress.cctx, &outBuff, &inBuff, directive)); + AIO_ReadPool_consumeBytes(ress.readCtx, inBuff.pos - oldIPos); /* count stats */ inputPresented++; @@ -1411,151 +1573,155 @@ /* Write compressed stream */ DISPLAYLEVEL(6, "ZSTD_compress_generic(end:%u) => input pos(%u)<=(%u)size ; output generated %u bytes \n", - (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); + (unsigned)directive, (unsigned)inBuff.pos, (unsigned)inBuff.size, (unsigned)outBuff.pos); if (outBuff.pos) { - size_t const sizeCheck = fwrite(ress.dstBuffer, 1, outBuff.pos, dstFile); - if (sizeCheck != outBuff.pos) - EXM_THROW(25, "Write error : %s (cannot write compressed block)", - strerror(errno)); + writeJob->usedBufferSize = outBuff.pos; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); compressedfilesize += outBuff.pos; } - /* display notification; and adapt compression level */ - if (READY_FOR_UPDATE()) { + /* adaptive mode : statistics measurement and speed correction */ + if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) { + ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); + + lastAdaptTime = UTIL_getTime(); + + /* check output speed */ + if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ + + unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; + assert(zfp.produced >= previous_zfp_update.produced); + assert(prefs->nbWorkers >= 1); + + /* test if compression is blocked + * either because output is slow and all buffers are full + * or because input is slow and no job can start while waiting for at least one buffer to be filled. + * note : exclude starting part, since currentJobID > 1 */ + if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ + && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ + ) { + DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") + speedChange = slower; + } + + previous_zfp_update = zfp; + + if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ + && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ + ) { + DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); + speedChange = slower; + } + flushWaiting = 0; + } + + /* course correct only if there is at least one new job completed */ + if (zfp.currentJobID > lastJobID) { + DISPLAYLEVEL(6, "compression level adaptation check \n") + + /* check input speed */ + if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ + if (inputBlocked <= 0) { + DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); + speedChange = slower; + } else if (speedChange == noChange) { + unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; + unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; + unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; + unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; + previous_zfp_correction = zfp; + assert(inputPresented > 0); + DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", + inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, + (unsigned)newlyIngested, (unsigned)newlyConsumed, + (unsigned)newlyFlushed, (unsigned)newlyProduced); + if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ + && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ + && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ + ) { + DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", + newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); + speedChange = faster; + } + } + inputBlocked = 0; + inputPresented = 0; + } + + if (speedChange == slower) { + DISPLAYLEVEL(6, "slower speed , higher compression \n") + compressionLevel ++; + if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); + if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; + compressionLevel += (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); + } + if (speedChange == faster) { + DISPLAYLEVEL(6, "faster speed , lighter compression \n") + compressionLevel --; + if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; + compressionLevel -= (compressionLevel == 0); /* skip 0 */ + ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); + } + speedChange = noChange; + + lastJobID = zfp.currentJobID; + } /* if (zfp.currentJobID > lastJobID) */ + } /* if (prefs->adaptiveMode && UTIL_clockSpanMicro(lastAdaptTime) > adaptEveryMicro) */ + + /* display notification */ + if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) { ZSTD_frameProgression const zfp = ZSTD_getFrameProgression(ress.cctx); double const cShare = (double)zfp.produced / (double)(zfp.consumed + !zfp.consumed/*avoid div0*/) * 100; UTIL_HumanReadableSize_t const buffered_hrs = UTIL_makeHumanReadableSize(zfp.ingested - zfp.consumed); UTIL_HumanReadableSize_t const consumed_hrs = UTIL_makeHumanReadableSize(zfp.consumed); UTIL_HumanReadableSize_t const produced_hrs = UTIL_makeHumanReadableSize(zfp.produced); + DELAY_NEXT_UPDATE(); + /* display progress notifications */ + DISPLAY_PROGRESS("\r%79s\r", ""); /* Clear out the current displayed line */ if (g_display_prefs.displayLevel >= 3) { - DISPLAYUPDATE(3, "\r(L%i) Buffered :%6.*f%4s - Consumed :%6.*f%4s - Compressed :%6.*f%4s => %.2f%% ", - compressionLevel, - buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, - consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, - produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, - cShare ); - } else if (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) { + /* Verbose progress update */ + DISPLAY_PROGRESS( + "(L%i) Buffered:%5.*f%s - Consumed:%5.*f%s - Compressed:%5.*f%s => %.2f%% ", + compressionLevel, + buffered_hrs.precision, buffered_hrs.value, buffered_hrs.suffix, + consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix, + produced_hrs.precision, produced_hrs.value, produced_hrs.suffix, + cShare ); + } else { /* Require level 2 or forcibly displayed progress counter for summarized updates */ - DISPLAYLEVEL(1, "\r%79s\r", ""); /* Clear out the current displayed line */ if (fCtx->nbFilesTotal > 1) { size_t srcFileNameSize = strlen(srcFileName); /* Ensure that the string we print is roughly the same size each time */ if (srcFileNameSize > 18) { const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: ...%s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: ...%s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName); } else { - DISPLAYLEVEL(1, "Compress: %u/%u files. Current: %*s ", + DISPLAY_PROGRESS("Compress: %u/%u files. Current: %*s ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, (int)(18-srcFileNameSize), srcFileName); } } - DISPLAYLEVEL(1, "Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); + DISPLAY_PROGRESS("Read:%6.*f%4s ", consumed_hrs.precision, consumed_hrs.value, consumed_hrs.suffix); if (fileSize != UTIL_FILESIZE_UNKNOWN) - DISPLAYLEVEL(2, "/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); - DISPLAYLEVEL(1, " ==> %2.f%%", cShare); - DELAY_NEXT_UPDATE(); + DISPLAY_PROGRESS("/%6.*f%4s", file_hrs.precision, file_hrs.value, file_hrs.suffix); + DISPLAY_PROGRESS(" ==> %2.f%%", cShare); } - - /* adaptive mode : statistics measurement and speed correction */ - if (prefs->adaptiveMode) { - - /* check output speed */ - if (zfp.currentJobID > 1) { /* only possible if nbWorkers >= 1 */ - - unsigned long long newlyProduced = zfp.produced - previous_zfp_update.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_update.flushed; - assert(zfp.produced >= previous_zfp_update.produced); - assert(prefs->nbWorkers >= 1); - - /* test if compression is blocked - * either because output is slow and all buffers are full - * or because input is slow and no job can start while waiting for at least one buffer to be filled. - * note : exclude starting part, since currentJobID > 1 */ - if ( (zfp.consumed == previous_zfp_update.consumed) /* no data compressed : no data available, or no more buffer to compress to, OR compression is really slow (compression of a single block is slower than update rate)*/ - && (zfp.nbActiveWorkers == 0) /* confirmed : no compression ongoing */ - ) { - DISPLAYLEVEL(6, "all buffers full : compression stopped => slow down \n") - speedChange = slower; - } - - previous_zfp_update = zfp; - - if ( (newlyProduced > (newlyFlushed * 9 / 8)) /* compression produces more data than output can flush (though production can be spiky, due to work unit : (N==4)*block sizes) */ - && (flushWaiting == 0) /* flush speed was never slowed by lack of production, so it's operating at max capacity */ - ) { - DISPLAYLEVEL(6, "compression faster than flush (%llu > %llu), and flushed was never slowed down by lack of production => slow down \n", newlyProduced, newlyFlushed); - speedChange = slower; - } - flushWaiting = 0; - } - - /* course correct only if there is at least one new job completed */ - if (zfp.currentJobID > lastJobID) { - DISPLAYLEVEL(6, "compression level adaptation check \n") - - /* check input speed */ - if (zfp.currentJobID > (unsigned)(prefs->nbWorkers+1)) { /* warm up period, to fill all workers */ - if (inputBlocked <= 0) { - DISPLAYLEVEL(6, "input is never blocked => input is slower than ingestion \n"); - speedChange = slower; - } else if (speedChange == noChange) { - unsigned long long newlyIngested = zfp.ingested - previous_zfp_correction.ingested; - unsigned long long newlyConsumed = zfp.consumed - previous_zfp_correction.consumed; - unsigned long long newlyProduced = zfp.produced - previous_zfp_correction.produced; - unsigned long long newlyFlushed = zfp.flushed - previous_zfp_correction.flushed; - previous_zfp_correction = zfp; - assert(inputPresented > 0); - DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", - inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, - (unsigned)newlyIngested, (unsigned)newlyConsumed, - (unsigned)newlyFlushed, (unsigned)newlyProduced); - if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ - && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ - && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ - ) { - DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", - newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); - speedChange = faster; - } - } - inputBlocked = 0; - inputPresented = 0; - } - - if (speedChange == slower) { - DISPLAYLEVEL(6, "slower speed , higher compression \n") - compressionLevel ++; - if (compressionLevel > ZSTD_maxCLevel()) compressionLevel = ZSTD_maxCLevel(); - if (compressionLevel > prefs->maxAdaptLevel) compressionLevel = prefs->maxAdaptLevel; - compressionLevel += (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - if (speedChange == faster) { - DISPLAYLEVEL(6, "faster speed , lighter compression \n") - compressionLevel --; - if (compressionLevel < prefs->minAdaptLevel) compressionLevel = prefs->minAdaptLevel; - compressionLevel -= (compressionLevel == 0); /* skip 0 */ - ZSTD_CCtx_setParameter(ress.cctx, ZSTD_c_compressionLevel, compressionLevel); - } - speedChange = noChange; - - lastJobID = zfp.currentJobID; - } /* if (zfp.currentJobID > lastJobID) */ - } /* if (g_adaptiveMode) */ - } /* if (READY_FOR_UPDATE()) */ + } /* if (SHOULD_DISPLAY_PROGRESS() && READY_FOR_UPDATE()) */ } /* while ((inBuff.pos != inBuff.size) */ } while (directive != ZSTD_e_end); - if (ferror(srcFile)) { - EXM_THROW(26, "Read error : I/O error"); - } if (fileSize != UTIL_FILESIZE_UNKNOWN && *readsize != fileSize) { EXM_THROW(27, "Read error : Incomplete read : %llu / %llu B", (unsigned long long)*readsize, (unsigned long long)fileSize); } + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ressPtr->writeCtx); + return compressedfilesize; } @@ -1620,20 +1786,18 @@ /* Status */ fCtx->totalBytesInput += (size_t)readsize; fCtx->totalBytesOutput += (size_t)compressedfilesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - if (g_display_prefs.displayLevel >= 2 && - !fCtx->hasStdoutOutput && - (g_display_prefs.displayLevel >= 3 || fCtx->nbFilesTotal <= 1)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) readsize); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) compressedfilesize); if (readsize == 0) { - DISPLAYLEVEL(2,"%-20s : (%6.*f%4s => %6.*f%4s, %s) \n", + DISPLAY_SUMMARY("%-20s : (%6.*f%s => %6.*f%s, %s) \n", srcFileName, hr_isize.precision, hr_isize.value, hr_isize.suffix, hr_osize.precision, hr_osize.value, hr_osize.suffix, dstFileName); } else { - DISPLAYLEVEL(2,"%-20s :%6.2f%% (%6.*f%4s => %6.*f%4s, %s) \n", + DISPLAY_SUMMARY("%-20s :%6.2f%% (%6.*f%s => %6.*f%s, %s) \n", srcFileName, (double)compressedfilesize / (double)readsize * 100, hr_isize.precision, hr_isize.value, hr_isize.suffix, @@ -1656,7 +1820,7 @@ /*! FIO_compressFilename_dstFile() : - * open dstFileName, or pass-through if ress.dstFile != NULL, + * open dstFileName, or pass-through if ress.file != NULL, * then start compression with FIO_compressFilename_internal(). * Manages source removal (--rm) and file permissions transfer. * note : ress.srcFile must be != NULL, @@ -1669,27 +1833,31 @@ cRess_t ress, const char* dstFileName, const char* srcFileName, + const stat_t* srcFileStat, int compressionLevel) { int closeDstFile = 0; int result; - stat_t statbuf; - int transferMTime = 0; - assert(ress.srcFile != NULL); - if (ress.dstFile == NULL) { - int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; + int transferStat = 0; + FILE *dstFile; + int dstFd = -1; + + assert(AIO_ReadPool_getFile(ress.readCtx) != NULL); + if (AIO_WritePool_getFile(ress.writeCtx) == NULL) { + int dstFileInitialPermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp (srcFileName, stdinmark) && strcmp (dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFileInitialPermissions = TEMPORARY_FILE_PERMISSIONS; } closeDstFile = 1; DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: opening dst: %s \n", dstFileName); - ress.dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); - if (ress.dstFile==NULL) return 1; /* could not open dstFileName */ + dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFileInitialPermissions); + if (dstFile==NULL) return 1; /* could not open dstFileName */ + dstFd = fileno(dstFile); + AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. * Otherwise we may delete the destination file if it already exists, * and the user presses Ctrl-C when asked if they wish to overwrite. @@ -1700,19 +1868,22 @@ result = FIO_compressFilename_internal(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); if (closeDstFile) { - FILE* const dstFile = ress.dstFile; - ress.dstFile = NULL; - clearHandler(); + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + DISPLAYLEVEL(6, "FIO_compressFilename_dstFile: closing dst: %s \n", dstFileName); - if (fclose(dstFile)) { /* error closing dstFile */ + if (AIO_WritePool_closeFile(ress.writeCtx)) { /* error closing file */ DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result=1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); } + if ( (result != 0) /* operation failure */ && strcmp(dstFileName, stdoutmark) /* special case : don't remove() stdout */ ) { @@ -1752,18 +1923,27 @@ int compressionLevel) { int result; + FILE* srcFile; + stat_t srcFileStat; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; DISPLAYLEVEL(6, "FIO_compressFilename_srcFile: %s \n", srcFileName); - /* ensure src is not a directory */ - if (UTIL_isDirectory(srcFileName)) { - DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); - return 1; - } + if (strcmp(srcFileName, stdinmark)) { + if (UTIL_stat(srcFileName, &srcFileStat)) { + /* failure to stat at all is handled during opening */ - /* ensure src is not the same as dict (if present) */ - if (ress.dictFileName != NULL && UTIL_isSameFile(srcFileName, ress.dictFileName)) { - DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); - return 1; + /* ensure src is not a directory */ + if (UTIL_isDirectoryStat(&srcFileStat)) { + DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); + return 1; + } + + /* ensure src is not the same as dict (if present) */ + if (ress.dictFileName != NULL && UTIL_isSameFileStat(srcFileName, ress.dictFileName, &srcFileStat, &ress.dictFileStat)) { + DISPLAYLEVEL(1, "zstd: cannot use %s as an input file and dictionary \n", srcFileName); + return 1; + } + } } /* Check if "srcFile" is compressed. Only done if --exclude-compressed flag is used @@ -1775,16 +1955,30 @@ return 0; } - ress.srcFile = FIO_openSrcFile(prefs, srcFileName); - if (ress.srcFile == NULL) return 1; /* srcFile could not be opened */ + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); + if (srcFile == NULL) return 1; /* srcFile could not be opened */ + + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } - result = FIO_compressFilename_dstFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); + AIO_ReadPool_setFile(ress.readCtx, srcFile); + result = FIO_compressFilename_dstFile( + fCtx, prefs, ress, + dstFileName, srcFileName, + &srcFileStat, compressionLevel); + AIO_ReadPool_closeFile(ress.readCtx); - fclose(ress.srcFile); - ress.srcFile = NULL; - if ( prefs->removeSrcFile /* --rm */ - && result == 0 /* success */ - && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ + if ( prefs->removeSrcFile /* --rm */ + && result == 0 /* success */ + && strcmp(srcFileName, stdinmark) /* exception : don't erase stdin */ ) { /* We must clear the handler, since after this point calling it would * delete both the source and destination files. @@ -1796,16 +1990,18 @@ return result; } -static const char* checked_index(const char* options[], size_t length, size_t index) { +static const char* +checked_index(const char* options[], size_t length, size_t index) { assert(index < length); - // Necessary to avoid warnings since -O3 will omit the above `assert` + /* Necessary to avoid warnings since -O3 will omit the above `assert` */ (void) length; return options[index]; } -#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (index)) +#define INDEX(options, index) checked_index((options), sizeof(options) / sizeof(char*), (size_t)(index)) -void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) { +void FIO_displayCompressionParameters(const FIO_prefs_t* prefs) +{ static const char* formatOptions[5] = {ZSTD_EXTENSION, GZ_EXTENSION, XZ_EXTENSION, LZMA_EXTENSION, LZ4_EXTENSION}; static const char* sparseOptions[3] = {" --no-sparse", "", " --sparse"}; @@ -1844,7 +2040,7 @@ const char* srcFileName, const char* dictFileName, int compressionLevel, ZSTD_compressionParameters comprParams) { - cRess_t const ress = FIO_createCResources(prefs, dictFileName, UTIL_getFileSize(srcFileName), compressionLevel, comprParams); + cRess_t ress = FIO_createCResources(prefs, dictFileName, UTIL_getFileSize(srcFileName), compressionLevel, comprParams); int const result = FIO_compressFilename_srcFile(fCtx, prefs, ress, dstFileName, srcFileName, compressionLevel); #define DISPLAY_LEVEL_DEFAULT 2 @@ -1866,6 +2062,11 @@ char* outDirFilename = NULL; size_t sfnSize = strlen(srcFileName); size_t const srcSuffixLen = strlen(suffix); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; + } + if (outDirName) { outDirFilename = FIO_createFilename_fromOutDir(srcFileName, outDirName, srcSuffixLen); sfnSize = strlen(outDirFilename); @@ -1928,23 +2129,24 @@ /* init */ assert(outFileName != NULL || suffix != NULL); if (outFileName != NULL) { /* output into a single destination (stdout typically) */ - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + FILE *dstFile; + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeCResources(&ress); return 1; } - ress.dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); - if (ress.dstFile == NULL) { /* could not open outFileName */ + dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == NULL) { /* could not open outFileName */ error = 1; } else { + AIO_WritePool_setFile(ress.writeCtx, dstFile); for (; fCtx->currFileIdx < fCtx->nbFilesTotal; ++fCtx->currFileIdx) { status = FIO_compressFilename_srcFile(fCtx, prefs, ress, outFileName, inFileNamesTable[fCtx->currFileIdx], compressionLevel); if (!status) fCtx->nbFilesProcessed++; error |= status; } - if (fclose(ress.dstFile)) + if (AIO_WritePool_closeFile(ress.writeCtx)) EXM_THROW(29, "Write error (%s) : cannot properly close %s", strerror(errno), outFileName); - ress.dstFile = NULL; } } else { if (outMirroredRootDirName) @@ -1975,16 +2177,23 @@ FIO_checkFilenameCollisions(inFileNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesInput != 0) { + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { UTIL_HumanReadableSize_t hr_isize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesInput); UTIL_HumanReadableSize_t hr_osize = UTIL_makeHumanReadableSize((U64) fCtx->totalBytesOutput); - DISPLAYLEVEL(2, "\r%79s\r", ""); - DISPLAYLEVEL(2, "%3d files compressed :%.2f%% (%6.*f%4s => %6.*f%4s)\n", - fCtx->nbFilesProcessed, - (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, - hr_isize.precision, hr_isize.value, hr_isize.suffix, - hr_osize.precision, hr_osize.value, hr_osize.suffix); + DISPLAY_PROGRESS("\r%79s\r", ""); + if (fCtx->totalBytesInput == 0) { + DISPLAY_SUMMARY("%3d files compressed : (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } else { + DISPLAY_SUMMARY("%3d files compressed : %.2f%% (%6.*f%4s => %6.*f%4s)\n", + fCtx->nbFilesProcessed, + (double)fCtx->totalBytesOutput/((double)fCtx->totalBytesInput)*100, + hr_isize.precision, hr_isize.value, hr_isize.suffix, + hr_osize.precision, hr_osize.value, hr_osize.suffix); + } } FIO_freeCResources(&ress); @@ -2001,22 +2210,27 @@ * Decompression ***************************************************************************/ typedef struct { - void* srcBuffer; - size_t srcBufferSize; - size_t srcBufferLoaded; - void* dstBuffer; - size_t dstBufferSize; + FIO_Dict_t dict; ZSTD_DStream* dctx; - FILE* dstFile; + WritePoolCtx_t *writeCtx; + ReadPoolCtx_t *readCtx; } dRess_t; static dRess_t FIO_createDResources(FIO_prefs_t* const prefs, const char* dictFileName) { + int useMMap = prefs->mmapDict == ZSTD_ps_enable; + int forceNoUseMMap = prefs->mmapDict == ZSTD_ps_disable; + stat_t statbuf; dRess_t ress; memset(&ress, 0, sizeof(ress)); - if (prefs->patchFromMode) - FIO_adjustMemLimitForPatchFromMode(prefs, UTIL_getFileSize(dictFileName), 0 /* just use the dict size */); + FIO_getDictFileStat(dictFileName, &statbuf); + + if (prefs->patchFromMode){ + U64 const dictSize = UTIL_getFileSizeStat(&statbuf); + useMMap |= dictSize > prefs->memLimit; + FIO_adjustMemLimitForPatchFromMode(prefs, dictSize, 0 /* just use the dict size */); + } /* Allocation */ ress.dctx = ZSTD_createDStream(); @@ -2025,159 +2239,54 @@ CHECK( ZSTD_DCtx_setMaxWindowSize(ress.dctx, prefs->memLimit) ); CHECK( ZSTD_DCtx_setParameter(ress.dctx, ZSTD_d_forceIgnoreChecksum, !prefs->checksumFlag)); - ress.srcBufferSize = ZSTD_DStreamInSize(); - ress.srcBuffer = malloc(ress.srcBufferSize); - ress.dstBufferSize = ZSTD_DStreamOutSize(); - ress.dstBuffer = malloc(ress.dstBufferSize); - if (!ress.srcBuffer || !ress.dstBuffer) - EXM_THROW(61, "Allocation error : not enough memory"); - /* dictionary */ - { void* dictBuffer; - size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName, prefs); - CHECK( ZSTD_initDStream_usingDict(ress.dctx, dictBuffer, dictBufferSize) ); - free(dictBuffer); + { + FIO_dictBufferType_t dictBufferType = (useMMap && !forceNoUseMMap) ? FIO_mmapDict : FIO_mallocDict; + FIO_initDict(&ress.dict, dictFileName, prefs, &statbuf, dictBufferType); + + CHECK(ZSTD_DCtx_reset(ress.dctx, ZSTD_reset_session_only) ); + + if (prefs->patchFromMode){ + CHECK(ZSTD_DCtx_refPrefix(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } else { + CHECK(ZSTD_DCtx_loadDictionary_byReference(ress.dctx, ress.dict.dictBuffer, ress.dict.dictBufferSize)); + } } + ress.writeCtx = AIO_WritePool_create(prefs, ZSTD_DStreamOutSize()); + ress.readCtx = AIO_ReadPool_create(prefs, ZSTD_DStreamInSize()); return ress; } static void FIO_freeDResources(dRess_t ress) { + FIO_freeDict(&(ress.dict)); CHECK( ZSTD_freeDStream(ress.dctx) ); - free(ress.srcBuffer); - free(ress.dstBuffer); + AIO_WritePool_free(ress.writeCtx); + AIO_ReadPool_free(ress.readCtx); } - -/** FIO_fwriteSparse() : -* @return : storedSkips, -* argument for next call to FIO_fwriteSparse() or FIO_fwriteSparseEnd() */ -static unsigned -FIO_fwriteSparse(FILE* file, - const void* buffer, size_t bufferSize, - const FIO_prefs_t* const prefs, - unsigned storedSkips) -{ - const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ - size_t bufferSizeT = bufferSize / sizeof(size_t); - const size_t* const bufferTEnd = bufferT + bufferSizeT; - const size_t* ptrT = bufferT; - static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ - - if (prefs->testMode) return 0; /* do not output anything in test mode */ - - if (!prefs->sparseFileSupport) { /* normal write */ - size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); - if (sizeCheck != bufferSize) - EXM_THROW(70, "Write error : cannot write decoded block : %s", - strerror(errno)); - return 0; - } - - /* avoid int overflow */ - if (storedSkips > 1 GB) { - if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) - EXM_THROW(91, "1 GB skip error (sparse file support)"); - storedSkips -= 1 GB; - } - - while (ptrT < bufferTEnd) { - size_t nb0T; - - /* adjust last segment if < 32 KB */ - size_t seg0SizeT = segmentSizeT; - if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; - bufferSizeT -= seg0SizeT; - - /* count leading zeroes */ - for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; - storedSkips += (unsigned)(nb0T * sizeof(size_t)); - - if (nb0T != seg0SizeT) { /* not all 0s */ - size_t const nbNon0ST = seg0SizeT - nb0T; - /* skip leading zeros */ - if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) - EXM_THROW(92, "Sparse skip error ; try --no-sparse"); - storedSkips = 0; - /* write the rest */ - if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) - EXM_THROW(93, "Write error : cannot write decoded block : %s", - strerror(errno)); - } - ptrT += seg0SizeT; - } - - { static size_t const maskT = sizeof(size_t)-1; - if (bufferSize & maskT) { - /* size not multiple of sizeof(size_t) : implies end of block */ - const char* const restStart = (const char*)bufferTEnd; - const char* restPtr = restStart; - const char* const restEnd = (const char*)buffer + bufferSize; - assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); - for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; - storedSkips += (unsigned) (restPtr - restStart); - if (restPtr != restEnd) { - /* not all remaining bytes are 0 */ - size_t const restSize = (size_t)(restEnd - restPtr); - if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) - EXM_THROW(92, "Sparse skip error ; try --no-sparse"); - if (fwrite(restPtr, 1, restSize, file) != restSize) - EXM_THROW(95, "Write error : cannot write end of decoded block : %s", - strerror(errno)); - storedSkips = 0; - } } } - - return storedSkips; -} - -static void -FIO_fwriteSparseEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) -{ - if (prefs->testMode) assert(storedSkips == 0); - if (storedSkips>0) { - assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ - (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ - if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) - EXM_THROW(69, "Final skip error (sparse file support)"); - /* last zero must be explicitly written, - * so that skipped ones get implicitly translated as zero by FS */ - { const char lastZeroByte[1] = { 0 }; - if (fwrite(lastZeroByte, 1, 1, file) != 1) - EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); - } } -} - - -/** FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode - @return : 0 (no error) */ -static int FIO_passThrough(const FIO_prefs_t* const prefs, - FILE* foutput, FILE* finput, - void* buffer, size_t bufferSize, - size_t alreadyLoaded) +/* FIO_passThrough() : just copy input into output, for compatibility with gzip -df mode + * @return : 0 (no error) */ +static int FIO_passThrough(dRess_t *ress) { - size_t const blockSize = MIN(64 KB, bufferSize); - size_t readFromInput; - unsigned storedSkips = 0; - - /* assumption : ress->srcBufferLoaded bytes already loaded and stored within buffer */ - { size_t const sizeCheck = fwrite(buffer, 1, alreadyLoaded, foutput); - if (sizeCheck != alreadyLoaded) { - DISPLAYLEVEL(1, "Pass-through write error : %s\n", strerror(errno)); - return 1; - } } - - do { - readFromInput = fread(buffer, 1, blockSize, finput); - storedSkips = FIO_fwriteSparse(foutput, buffer, readFromInput, prefs, storedSkips); - } while (readFromInput == blockSize); - if (ferror(finput)) { - DISPLAYLEVEL(1, "Pass-through read error : %s\n", strerror(errno)); - return 1; - } - assert(feof(finput)); - - FIO_fwriteSparseEnd(prefs, foutput, storedSkips); + size_t const blockSize = MIN(MIN(64 KB, ZSTD_DStreamInSize()), ZSTD_DStreamOutSize()); + IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + + while(ress->readCtx->srcBufferLoaded) { + size_t writeSize; + writeSize = MIN(blockSize, ress->readCtx->srcBufferLoaded); + assert(writeSize <= writeJob->bufferSize); + memcpy(writeJob->buffer, ress->readCtx->srcBuffer, writeSize); + writeJob->usedBufferSize = writeSize; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); + AIO_ReadPool_consumeBytes(ress->readCtx, writeSize); + AIO_ReadPool_fillBuffer(ress->readCtx, blockSize); + } + assert(ress->readCtx->reachedEof); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return 0; } @@ -2186,7 +2295,8 @@ static void FIO_zstdErrorHelp(const FIO_prefs_t* const prefs, const dRess_t* ress, - size_t err, const char* srcFileName) + size_t err, + const char* srcFileName) { ZSTD_frameHeader header; @@ -2195,7 +2305,7 @@ return; /* Try to decode the frame header */ - err = ZSTD_getFrameHeader(&header, ress->srcBuffer, ress->srcBufferLoaded); + err = ZSTD_getFrameHeader(&header, ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded); if (err == 0) { unsigned long long const windowSize = header.windowSize; unsigned const windowLog = FIO_highbit64(windowSize) + ((windowSize & (windowSize - 1)) != 0); @@ -2218,13 +2328,13 @@ */ #define FIO_ERROR_FRAME_DECODING ((unsigned long long)(-2)) static unsigned long long -FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, FILE* finput, +FIO_decompressZstdFrame(FIO_ctx_t* const fCtx, dRess_t* ress, const FIO_prefs_t* const prefs, const char* srcFileName, U64 alreadyDecoded) /* for multi-frames streams */ { U64 frameSize = 0; - U32 storedSkips = 0; + IOJob_t *writeJob = AIO_WritePool_acquireJob(ress->writeCtx); /* display last 20 characters only */ { size_t const srcFileLength = strlen(srcFileName); @@ -2234,67 +2344,60 @@ ZSTD_DCtx_reset(ress->dctx, ZSTD_reset_session_only); /* Header loading : ensures ZSTD_getFrameHeader() will succeed */ - { size_t const toDecode = ZSTD_FRAMEHEADERSIZE_MAX; - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - ress->srcBufferLoaded += fread(startPosition, 1, toRead, finput); - } } + AIO_ReadPool_fillBuffer(ress->readCtx, ZSTD_FRAMEHEADERSIZE_MAX); /* Main decompression Loop */ while (1) { - ZSTD_inBuffer inBuff = { ress->srcBuffer, ress->srcBufferLoaded, 0 }; - ZSTD_outBuffer outBuff= { ress->dstBuffer, ress->dstBufferSize, 0 }; + ZSTD_inBuffer inBuff = setInBuffer( ress->readCtx->srcBuffer, ress->readCtx->srcBufferLoaded, 0 ); + ZSTD_outBuffer outBuff= setOutBuffer( writeJob->buffer, writeJob->bufferSize, 0 ); size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff); - const int displayLevel = (g_display_prefs.progressSetting == FIO_ps_always) ? 1 : 2; UTIL_HumanReadableSize_t const hrs = UTIL_makeHumanReadableSize(alreadyDecoded+frameSize); if (ZSTD_isError(readSizeHint)) { DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n", srcFileName, ZSTD_getErrorName(readSizeHint)); FIO_zstdErrorHelp(prefs, ress, readSizeHint, srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } /* Write block */ - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, prefs, storedSkips); + writeJob->usedBufferSize = outBuff.pos; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); frameSize += outBuff.pos; if (fCtx->nbFilesTotal > 1) { size_t srcFileNameSize = strlen(srcFileName); if (srcFileNameSize > 18) { const char* truncatedSrcFileName = srcFileName + srcFileNameSize - 15; - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", - fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS( + "\rDecompress: %2u/%2u files. Current: ...%s : %.*f%s... ", + fCtx->currFileIdx+1, fCtx->nbFilesTotal, truncatedSrcFileName, hrs.precision, hrs.value, hrs.suffix); } else { - DISPLAYUPDATE(displayLevel, "\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", + DISPLAYUPDATE_PROGRESS("\rDecompress: %2u/%2u files. Current: %s : %.*f%s... ", fCtx->currFileIdx+1, fCtx->nbFilesTotal, srcFileName, hrs.precision, hrs.value, hrs.suffix); } } else { - DISPLAYUPDATE(displayLevel, "\r%-20.20s : %.*f%s... ", + DISPLAYUPDATE_PROGRESS("\r%-20.20s : %.*f%s... ", srcFileName, hrs.precision, hrs.value, hrs.suffix); } - if (inBuff.pos > 0) { - memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos); - ress->srcBufferLoaded -= inBuff.pos; - } + AIO_ReadPool_consumeBytes(ress->readCtx, inBuff.pos); if (readSizeHint == 0) break; /* end of frame */ /* Fill input buffer */ - { size_t const toDecode = MIN(readSizeHint, ress->srcBufferSize); /* support large skippable frames */ - if (ress->srcBufferLoaded < toDecode) { - size_t const toRead = toDecode - ress->srcBufferLoaded; /* > 0 */ - void* const startPosition = (char*)ress->srcBuffer + ress->srcBufferLoaded; - size_t const readSize = fread(startPosition, 1, toRead, finput); + { size_t const toDecode = MIN(readSizeHint, ZSTD_DStreamInSize()); /* support large skippable frames */ + if (ress->readCtx->srcBufferLoaded < toDecode) { + size_t const readSize = AIO_ReadPool_fillBuffer(ress->readCtx, toDecode); if (readSize==0) { DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n", - srcFileName); + srcFileName); + AIO_WritePool_releaseIoJob(writeJob); return FIO_ERROR_FRAME_DECODING; } - ress->srcBufferLoaded += readSize; - } } } + } } } - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return frameSize; } @@ -2302,37 +2405,36 @@ #ifdef ZSTD_GZDECOMPRESS static unsigned long long -FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* srcFileName) +FIO_decompressGzFrame(dRess_t* ress, const char* srcFileName) { unsigned long long outFileSize = 0; z_stream strm; int flush = Z_NO_FLUSH; int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = 0; strm.avail_in = 0; - /* see http://www.zlib.net/manual.html */ + /* see https://www.zlib.net/manual.html */ if (inflateInit2(&strm, 15 /* maxWindowLogSize */ + 16 /* gzip only */) != Z_OK) return FIO_ERROR_FRAME_DECODING; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; - strm.avail_in = (uInt)ress->srcBufferLoaded; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; for ( ; ; ) { int ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) flush = Z_FINISH; - strm.next_in = (z_const unsigned char*)ress->srcBuffer; - strm.avail_in = (uInt)ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) flush = Z_FINISH; + strm.next_in = (z_const unsigned char*)ress->readCtx->srcBuffer; + strm.avail_in = (uInt)ress->readCtx->srcBufferLoaded; } ret = inflate(&strm, flush); if (ret == Z_BUF_ERROR) { @@ -2343,35 +2445,34 @@ DISPLAYLEVEL(1, "zstd: %s: inflate error %d \n", srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decompBytes, prefs, storedSkips); + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (Bytef*)ress->dstBuffer; - strm.avail_out = (uInt)ress->dstBufferSize; + strm.next_out = (Bytef*)writeJob->buffer; + strm.avail_out = (uInt)writeJob->bufferSize; } } if (ret == Z_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); + if ( (inflateEnd(&strm) != Z_OK) /* release resources ; error detected */ && (decodingError==0) ) { DISPLAYLEVEL(1, "zstd: %s: inflateEnd error \n", srcFileName); decodingError = 1; } - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif - #ifdef ZSTD_LZMADECOMPRESS static unsigned long long -FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, +FIO_decompressLzmaFrame(dRess_t* ress, const char* srcFileName, int plain_lzma) { unsigned long long outFileSize = 0; @@ -2379,7 +2480,7 @@ lzma_action action = LZMA_RUN; lzma_ret initRet; int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; strm.next_in = 0; strm.avail_in = 0; @@ -2396,18 +2497,19 @@ return FIO_ERROR_FRAME_DECODING; } - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; for ( ; ; ) { lzma_ret ret; if (strm.avail_in == 0) { - ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile); - if (ress->srcBufferLoaded == 0) action = LZMA_FINISH; - strm.next_in = (BYTE const*)ress->srcBuffer; - strm.avail_in = ress->srcBufferLoaded; + AIO_ReadPool_consumeAndRefill(ress->readCtx); + if (ress->readCtx->srcBufferLoaded == 0) action = LZMA_FINISH; + strm.next_in = (BYTE const*)ress->readCtx->srcBuffer; + strm.avail_in = ress->readCtx->srcBufferLoaded; } ret = lzma_code(&strm, action); @@ -2420,104 +2522,90 @@ srcFileName, ret); decodingError = 1; break; } - { size_t const decompBytes = ress->dstBufferSize - strm.avail_out; + { size_t const decompBytes = writeJob->bufferSize - strm.avail_out; if (decompBytes) { - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decompBytes, prefs, storedSkips); + writeJob->usedBufferSize = decompBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); outFileSize += decompBytes; - strm.next_out = (BYTE*)ress->dstBuffer; - strm.avail_out = ress->dstBufferSize; + strm.next_out = (BYTE*)writeJob->buffer; + strm.avail_out = writeJob->bufferSize; } } if (ret == LZMA_STREAM_END) break; } - if (strm.avail_in > 0) - memmove(ress->srcBuffer, strm.next_in, strm.avail_in); - ress->srcBufferLoaded = strm.avail_in; + AIO_ReadPool_consumeBytes(ress->readCtx, ress->readCtx->srcBufferLoaded - strm.avail_in); lzma_end(&strm); - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : outFileSize; } #endif #ifdef ZSTD_LZ4DECOMPRESS static unsigned long long -FIO_decompressLz4Frame(dRess_t* ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* srcFileName) +FIO_decompressLz4Frame(dRess_t* ress, const char* srcFileName) { unsigned long long filesize = 0; - LZ4F_errorCode_t nextToLoad; + LZ4F_errorCode_t nextToLoad = 4; LZ4F_decompressionContext_t dCtx; LZ4F_errorCode_t const errorCode = LZ4F_createDecompressionContext(&dCtx, LZ4F_VERSION); int decodingError = 0; - unsigned storedSkips = 0; + IOJob_t *writeJob = NULL; if (LZ4F_isError(errorCode)) { DISPLAYLEVEL(1, "zstd: failed to create lz4 decompression context \n"); return FIO_ERROR_FRAME_DECODING; } - /* Init feed with magic number (already consumed from FILE* sFile) */ - { size_t inSize = 4; - size_t outSize= 0; - MEM_writeLE32(ress->srcBuffer, LZ4_MAGICNUMBER); - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &outSize, ress->srcBuffer, &inSize, NULL); - if (LZ4F_isError(nextToLoad)) { - DISPLAYLEVEL(1, "zstd: %s: lz4 header error : %s \n", - srcFileName, LZ4F_getErrorName(nextToLoad)); - LZ4F_freeDecompressionContext(dCtx); - return FIO_ERROR_FRAME_DECODING; - } } + writeJob = AIO_WritePool_acquireJob(ress->writeCtx); /* Main Loop */ for (;nextToLoad;) { - size_t readSize; size_t pos = 0; - size_t decodedBytes = ress->dstBufferSize; + size_t decodedBytes = writeJob->bufferSize; + int fullBufferDecoded = 0; /* Read input */ - if (nextToLoad > ress->srcBufferSize) nextToLoad = ress->srcBufferSize; - readSize = fread(ress->srcBuffer, 1, nextToLoad, srcFile); - if (!readSize) break; /* reached end of file or stream */ + AIO_ReadPool_fillBuffer(ress->readCtx, nextToLoad); + if(!ress->readCtx->srcBufferLoaded) break; /* reached end of file */ - while ((pos < readSize) || (decodedBytes == ress->dstBufferSize)) { /* still to read, or still to flush */ + while ((pos < ress->readCtx->srcBufferLoaded) || fullBufferDecoded) { /* still to read, or still to flush */ /* Decode Input (at least partially) */ - size_t remaining = readSize - pos; - decodedBytes = ress->dstBufferSize; - nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &decodedBytes, (char*)(ress->srcBuffer)+pos, &remaining, NULL); + size_t remaining = ress->readCtx->srcBufferLoaded - pos; + decodedBytes = writeJob->bufferSize; + nextToLoad = LZ4F_decompress(dCtx, writeJob->buffer, &decodedBytes, (char*)(ress->readCtx->srcBuffer)+pos, + &remaining, NULL); if (LZ4F_isError(nextToLoad)) { DISPLAYLEVEL(1, "zstd: %s: lz4 decompression error : %s \n", srcFileName, LZ4F_getErrorName(nextToLoad)); decodingError = 1; nextToLoad = 0; break; } pos += remaining; + assert(pos <= ress->readCtx->srcBufferLoaded); + fullBufferDecoded = decodedBytes == writeJob->bufferSize; /* Write Block */ if (decodedBytes) { UTIL_HumanReadableSize_t hrs; - storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, decodedBytes, prefs, storedSkips); + writeJob->usedBufferSize = decodedBytes; + AIO_WritePool_enqueueAndReacquireWriteJob(&writeJob); filesize += decodedBytes; hrs = UTIL_makeHumanReadableSize(filesize); - DISPLAYUPDATE(2, "\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); + DISPLAYUPDATE_PROGRESS("\rDecompressed : %.*f%s ", hrs.precision, hrs.value, hrs.suffix); } if (!nextToLoad) break; } + AIO_ReadPool_consumeBytes(ress->readCtx, pos); } - /* can be out because readSize == 0, which could be an fread() error */ - if (ferror(srcFile)) { - DISPLAYLEVEL(1, "zstd: %s: read error \n", srcFileName); - decodingError=1; - } - if (nextToLoad!=0) { DISPLAYLEVEL(1, "zstd: %s: unfinished lz4 stream \n", srcFileName); decodingError=1; } LZ4F_freeDecompressionContext(dCtx); - ress->srcBufferLoaded = 0; /* LZ4F will reach exact frame boundary */ - FIO_fwriteSparseEnd(prefs, ress->dstFile, storedSkips); + AIO_WritePool_releaseIoJob(writeJob); + AIO_WritePool_sparseWriteEnd(ress->writeCtx); return decodingError ? FIO_ERROR_FRAME_DECODING : filesize; } @@ -2532,23 +2620,30 @@ * 1 : error */ static int FIO_decompressFrames(FIO_ctx_t* const fCtx, - dRess_t ress, FILE* srcFile, - const FIO_prefs_t* const prefs, - const char* dstFileName, const char* srcFileName) + dRess_t ress, const FIO_prefs_t* const prefs, + const char* dstFileName, const char* srcFileName) { unsigned readSomething = 0; unsigned long long filesize = 0; - assert(srcFile != NULL); + int passThrough = prefs->passThrough; + + if (passThrough == -1) { + /* If pass-through mode is not explicitly enabled or disabled, + * default to the legacy behavior of enabling it if we are writing + * to stdout with the overwrite flag enabled. + */ + passThrough = prefs->overwrite && !strcmp(dstFileName, stdoutmark); + } + assert(passThrough == 0 || passThrough == 1); /* for each frame */ for ( ; ; ) { /* check magic number -> version */ size_t const toRead = 4; - const BYTE* const buf = (const BYTE*)ress.srcBuffer; - if (ress.srcBufferLoaded < toRead) /* load up to 4 bytes for header */ - ress.srcBufferLoaded += fread((char*)ress.srcBuffer + ress.srcBufferLoaded, - (size_t)1, toRead - ress.srcBufferLoaded, srcFile); - if (ress.srcBufferLoaded==0) { + const BYTE* buf; + AIO_ReadPool_fillBuffer(ress.readCtx, toRead); + buf = (const BYTE*)ress.readCtx->srcBuffer; + if (ress.readCtx->srcBufferLoaded==0) { if (readSomething==0) { /* srcFile is empty (which is invalid) */ DISPLAYLEVEL(1, "zstd: %s: unexpected end of file \n", srcFileName); return 1; @@ -2556,17 +2651,20 @@ break; /* no more input */ } readSomething = 1; /* there is at least 1 byte in srcFile */ - if (ress.srcBufferLoaded < toRead) { + if (ress.readCtx->srcBufferLoaded < toRead) { /* not enough input to check magic number */ + if (passThrough) { + return FIO_passThrough(&ress); + } DISPLAYLEVEL(1, "zstd: %s: unknown header \n", srcFileName); return 1; } - if (ZSTD_isFrame(buf, ress.srcBufferLoaded)) { - unsigned long long const frameSize = FIO_decompressZstdFrame(fCtx, &ress, srcFile, prefs, srcFileName, filesize); + if (ZSTD_isFrame(buf, ress.readCtx->srcBufferLoaded)) { + unsigned long long const frameSize = FIO_decompressZstdFrame(fCtx, &ress, prefs, srcFileName, filesize); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; } else if (buf[0] == 31 && buf[1] == 139) { /* gz magic number */ #ifdef ZSTD_GZDECOMPRESS - unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFile, prefs, srcFileName); + unsigned long long const frameSize = FIO_decompressGzFrame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -2576,7 +2674,7 @@ } else if ((buf[0] == 0xFD && buf[1] == 0x37) /* xz magic number */ || (buf[0] == 0x5D && buf[1] == 0x00)) { /* lzma header (no magic number) */ #ifdef ZSTD_LZMADECOMPRESS - unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFile, prefs, srcFileName, buf[0] != 0xFD); + unsigned long long const frameSize = FIO_decompressLzmaFrame(&ress, srcFileName, buf[0] != 0xFD); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else @@ -2585,18 +2683,15 @@ #endif } else if (MEM_readLE32(buf) == LZ4_MAGICNUMBER) { #ifdef ZSTD_LZ4DECOMPRESS - unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFile, prefs, srcFileName); + unsigned long long const frameSize = FIO_decompressLz4Frame(&ress, srcFileName); if (frameSize == FIO_ERROR_FRAME_DECODING) return 1; filesize += frameSize; #else DISPLAYLEVEL(1, "zstd: %s: lz4 file cannot be uncompressed (zstd compiled without HAVE_LZ4) -- ignored \n", srcFileName); return 1; #endif - } else if ((prefs->overwrite) && !strcmp (dstFileName, stdoutmark)) { /* pass-through mode */ - return FIO_passThrough(prefs, - ress.dstFile, srcFile, - ress.srcBuffer, ress.srcBufferSize, - ress.srcBufferLoaded); + } else if (passThrough) { + return FIO_passThrough(&ress); } else { DISPLAYLEVEL(1, "zstd: %s: unsupported format \n", srcFileName); return 1; @@ -2604,48 +2699,47 @@ /* Final Status */ fCtx->totalBytesOutput += (size_t)filesize; - DISPLAYLEVEL(2, "\r%79s\r", ""); - /* No status message in pipe mode (stdin - stdout) or multi-files mode */ - if ((g_display_prefs.displayLevel >= 2 && fCtx->nbFilesTotal <= 1) || - g_display_prefs.displayLevel >= 3 || - g_display_prefs.progressSetting == FIO_ps_always) { - DISPLAYLEVEL(1, "\r%-20s: %llu bytes \n", srcFileName, filesize); - } + DISPLAY_PROGRESS("\r%79s\r", ""); + if (FIO_shouldDisplayFileSummary(fCtx)) + DISPLAY_SUMMARY("%-20s: %llu bytes \n", srcFileName, filesize); return 0; } /** FIO_decompressDstFile() : - open `dstFileName`, - or path-through if ress.dstFile is already != 0, + open `dstFileName`, or pass-through if writeCtx's file is already != 0, then start decompression process (FIO_decompressFrames()). @return : 0 : OK 1 : operation aborted */ static int FIO_decompressDstFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, - dRess_t ress, FILE* srcFile, - const char* dstFileName, const char* srcFileName) + dRess_t ress, + const char* dstFileName, + const char* srcFileName, + const stat_t* srcFileStat) { int result; - stat_t statbuf; int releaseDstFile = 0; - int transferMTime = 0; + int transferStat = 0; + int dstFd = 0; - if ((ress.dstFile == NULL) && (prefs->testMode==0)) { + if ((AIO_WritePool_getFile(ress.writeCtx) == NULL) && (prefs->testMode == 0)) { + FILE *dstFile; int dstFilePermissions = DEFAULT_FILE_PERMISSIONS; if ( strcmp(srcFileName, stdinmark) /* special case : don't transfer permissions from stdin */ && strcmp(dstFileName, stdoutmark) - && UTIL_stat(srcFileName, &statbuf) - && UTIL_isRegularFileStat(&statbuf) ) { - dstFilePermissions = statbuf.st_mode; - transferMTime = 1; + && UTIL_isRegularFileStat(srcFileStat) ) { + transferStat = 1; + dstFilePermissions = TEMPORARY_FILE_PERMISSIONS; } releaseDstFile = 1; - ress.dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); - if (ress.dstFile==NULL) return 1; + dstFile = FIO_openDstFile(fCtx, prefs, srcFileName, dstFileName, dstFilePermissions); + if (dstFile==NULL) return 1; + dstFd = fileno(dstFile); + AIO_WritePool_setFile(ress.writeCtx, dstFile); /* Must only be added after FIO_openDstFile() succeeds. * Otherwise we may delete the destination file if it already exists, @@ -2654,19 +2748,22 @@ addHandler(dstFileName); } - result = FIO_decompressFrames(fCtx, ress, srcFile, prefs, dstFileName, srcFileName); + result = FIO_decompressFrames(fCtx, ress, prefs, dstFileName, srcFileName); if (releaseDstFile) { - FILE* const dstFile = ress.dstFile; clearHandler(); - ress.dstFile = NULL; - if (fclose(dstFile)) { + + if (transferStat) { + UTIL_setFDStat(dstFd, dstFileName, srcFileStat); + } + + if (AIO_WritePool_closeFile(ress.writeCtx)) { DISPLAYLEVEL(1, "zstd: %s: %s \n", dstFileName, strerror(errno)); result = 1; } - if (transferMTime) { - UTIL_utime(dstFileName, &statbuf); + if (transferStat) { + UTIL_utime(dstFileName, srcFileStat); } if ( (result != 0) /* operation failure */ @@ -2688,18 +2785,34 @@ static int FIO_decompressSrcFile(FIO_ctx_t* const fCtx, FIO_prefs_t* const prefs, dRess_t ress, const char* dstFileName, const char* srcFileName) { FILE* srcFile; + stat_t srcFileStat; int result; + U64 fileSize = UTIL_FILESIZE_UNKNOWN; if (UTIL_isDirectory(srcFileName)) { DISPLAYLEVEL(1, "zstd: %s is a directory -- ignored \n", srcFileName); return 1; } - srcFile = FIO_openSrcFile(prefs, srcFileName); + srcFile = FIO_openSrcFile(prefs, srcFileName, &srcFileStat); if (srcFile==NULL) return 1; - ress.srcBufferLoaded = 0; - result = FIO_decompressDstFile(fCtx, prefs, ress, srcFile, dstFileName, srcFileName); + /* Don't use AsyncIO for small files */ + if (strcmp(srcFileName, stdinmark)) /* Stdin doesn't have stats */ + fileSize = UTIL_getFileSizeStat(&srcFileStat); + if(fileSize != UTIL_FILESIZE_UNKNOWN && fileSize < ZSTD_BLOCKSIZE_MAX * 3) { + AIO_ReadPool_setAsync(ress.readCtx, 0); + AIO_WritePool_setAsync(ress.writeCtx, 0); + } else { + AIO_ReadPool_setAsync(ress.readCtx, 1); + AIO_WritePool_setAsync(ress.writeCtx, 1); + } + + AIO_ReadPool_setFile(ress.readCtx, srcFile); + + result = FIO_decompressDstFile(fCtx, prefs, ress, dstFileName, srcFileName, &srcFileStat); + + AIO_ReadPool_setFile(ress.readCtx, NULL); /* Close file */ if (fclose(srcFile)) { @@ -2731,6 +2844,8 @@ int const decodingError = FIO_decompressSrcFile(fCtx, prefs, ress, dstFileName, srcFileName); + + FIO_freeDResources(ress); return decodingError; } @@ -2788,6 +2903,11 @@ size_t srcSuffixLen; const char* const srcSuffix = strrchr(srcFileName, '.'); + + if(!strcmp(srcFileName, stdinmark)) { + return stdoutmark; + } + if (srcSuffix == NULL) { DISPLAYLEVEL(1, "zstd: %s: unknown suffix (%s expected). " @@ -2869,20 +2989,21 @@ dRess_t ress = FIO_createDResources(prefs, dictFileName); if (outFileName) { - if (FIO_removeMultiFilesWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { + if (FIO_multiFilesConcatWarning(fCtx, prefs, outFileName, 1 /* displayLevelCutoff */)) { FIO_freeDResources(ress); return 1; } if (!prefs->testMode) { - ress.dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); - if (ress.dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName); + FILE* dstFile = FIO_openDstFile(fCtx, prefs, NULL, outFileName, DEFAULT_FILE_PERMISSIONS); + if (dstFile == 0) EXM_THROW(19, "cannot open %s", outFileName); + AIO_WritePool_setFile(ress.writeCtx, dstFile); } for (; fCtx->currFileIdx < fCtx->nbFilesTotal; fCtx->currFileIdx++) { status = FIO_decompressSrcFile(fCtx, prefs, ress, outFileName, srcNamesTable[fCtx->currFileIdx]); if (!status) fCtx->nbFilesProcessed++; error |= status; } - if ((!prefs->testMode) && (fclose(ress.dstFile))) + if ((!prefs->testMode) && (AIO_WritePool_closeFile(ress.writeCtx))) EXM_THROW(72, "Write error : %s : cannot properly close output file", strerror(errno)); } else { @@ -2912,8 +3033,11 @@ FIO_checkFilenameCollisions(srcNamesTable , (unsigned)fCtx->nbFilesTotal); } - if (fCtx->nbFilesProcessed >= 1 && fCtx->nbFilesTotal > 1 && fCtx->totalBytesOutput != 0) - DISPLAYLEVEL(2, "%d files decompressed : %6zu bytes total \n", fCtx->nbFilesProcessed, fCtx->totalBytesOutput); + if (FIO_shouldDisplayMultipleFileSummary(fCtx)) { + DISPLAY_PROGRESS("\r%79s\r", ""); + DISPLAY_SUMMARY("%d files decompressed : %6llu bytes total \n", + fCtx->nbFilesProcessed, (unsigned long long)fCtx->totalBytesOutput); + } FIO_freeDResources(ress); return error; @@ -2931,7 +3055,9 @@ int numSkippableFrames; int decompUnavailable; int usesCheck; + BYTE checksum[4]; U32 nbFiles; + unsigned dictID; } fileInfo_t; typedef enum { @@ -2939,7 +3065,7 @@ info_frame_error=1, info_not_zstd=2, info_file_error=3, - info_truncated_input=4, + info_truncated_input=4 } InfoError; #define ERROR_IF(c,n,...) { \ @@ -2986,6 +3112,12 @@ } ERROR_IF(ZSTD_getFrameHeader(&header, headerBuffer, numBytesRead) != 0, info_frame_error, "Error: could not decode frame header"); + if (info->dictID != 0 && info->dictID != header.dictID) { + DISPLAY("WARNING: File contains multiple frames with different dictionary IDs. Showing dictID 0 instead"); + info->dictID = 0; + } else { + info->dictID = header.dictID; + } info->windowSize = header.windowSize; /* move to the end of the frame header */ { size_t const headerSize = ZSTD_frameHeaderSize(headerBuffer, numBytesRead); @@ -3018,8 +3150,8 @@ int const contentChecksumFlag = (frameHeaderDescriptor & (1 << 2)) >> 2; if (contentChecksumFlag) { info->usesCheck = 1; - ERROR_IF(fseek(srcFile, 4, SEEK_CUR) != 0, - info_frame_error, "Error: could not skip past checksum"); + ERROR_IF(fread(info->checksum, 1, 4, srcFile) != 4, + info_frame_error, "Error: could not read checksum"); } } info->numActualFrames++; } @@ -3045,10 +3177,11 @@ getFileInfo_fileConfirmed(fileInfo_t* info, const char* inFileName) { InfoError status; - FILE* const srcFile = FIO_openSrcFile(NULL, inFileName); + stat_t srcFileStat; + FILE* const srcFile = FIO_openSrcFile(NULL, inFileName, &srcFileStat); ERROR_IF(srcFile == NULL, info_file_error, "Error: could not open source file %s", inFileName); - info->compressedSize = UTIL_getFileSize(inFileName); + info->compressedSize = UTIL_getFileSizeStat(&srcFileStat); status = FIO_analyzeFrames(info, srcFile); fclose(srcFile); @@ -3098,6 +3231,7 @@ DISPLAYOUT("# Zstandard Frames: %d\n", info->numActualFrames); if (info->numSkippableFrames) DISPLAYOUT("# Skippable Frames: %d\n", info->numSkippableFrames); + DISPLAYOUT("DictID: %u\n", info->dictID); DISPLAYOUT("Window Size: %.*f%s (%llu B)\n", window_hrs.precision, window_hrs.value, window_hrs.suffix, (unsigned long long)info->windowSize); @@ -3110,7 +3244,16 @@ (unsigned long long)info->decompressedSize); DISPLAYOUT("Ratio: %.4f\n", ratio); } - DISPLAYOUT("Check: %s\n", checkString); + + if (info->usesCheck && info->numActualFrames == 1) { + DISPLAYOUT("Check: %s %02x%02x%02x%02x\n", checkString, + info->checksum[3], info->checksum[2], + info->checksum[1], info->checksum[0] + ); + } else { + DISPLAYOUT("Check: %s\n", checkString); + } + DISPLAYOUT("\n"); } } @@ -3174,7 +3317,7 @@ } } if (numFiles == 0) { - if (!IS_CONSOLE(stdin)) { + if (!UTIL_isConsole(stdin)) { DISPLAYLEVEL(1, "zstd: --list does not support reading from standard input \n"); } DISPLAYLEVEL(1, "No files given \n"); diff --git a/sys/contrib/zstd/programs/fileio_asyncio.h b/sys/contrib/zstd/programs/fileio_asyncio.h new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/programs/fileio_asyncio.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /* + * FileIO AsyncIO exposes read/write IO pools that allow doing IO asynchronously. + * Current implementation relies on having one thread that reads and one that + * writes. + * Each IO pool supports up to `MAX_IO_JOBS` that can be enqueued for work, but + * are performed serially by the appropriate worker thread. + * Most systems exposes better primitives to perform asynchronous IO, such as + * io_uring on newer linux systems. The API is built in such a way that in the + * future we could replace the threads with better solutions when available. + */ + +#ifndef ZSTD_FILEIO_ASYNCIO_H +#define ZSTD_FILEIO_ASYNCIO_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "util.h" +#include "../lib/common/pool.h" +#include "../lib/common/threading.h" + +#define MAX_IO_JOBS (10) + +typedef struct { + /* These struct fields should be set only on creation and not changed afterwards */ + POOL_ctx* threadPool; + int threadPoolActive; + int totalIoJobs; + const FIO_prefs_t* prefs; + POOL_function poolFunction; + + /* Controls the file we currently write to, make changes only by using provided utility functions */ + FILE* file; + + /* The jobs and availableJobsCount fields are accessed by both the main and worker threads and should + * only be mutated after locking the mutex */ + ZSTD_pthread_mutex_t ioJobsMutex; + void* availableJobs[MAX_IO_JOBS]; + int availableJobsCount; + size_t jobBufferSize; +} IOPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + + /* State regarding the currently read file */ + int reachedEof; + U64 nextReadOffset; + U64 waitingOnOffset; + + /* We may hold an IOJob object as needed if we actively expose its buffer. */ + void *currentJobHeld; + + /* Coalesce buffer is used to join two buffers in case where we need to read more bytes than left in + * the first of them. Shouldn't be accessed from outside ot utility functions. */ + U8 *coalesceBuffer; + + /* Read buffer can be used by consumer code, take care when copying this pointer aside as it might + * change when consuming / refilling buffer. */ + U8 *srcBuffer; + size_t srcBufferLoaded; + + /* We need to know what tasks completed so we can use their buffers when their time comes. + * Should only be accessed after locking base.ioJobsMutex . */ + void* completedJobs[MAX_IO_JOBS]; + int completedJobsCount; + ZSTD_pthread_cond_t jobCompletedCond; +} ReadPoolCtx_t; + +typedef struct { + IOPoolCtx_t base; + unsigned storedSkips; +} WritePoolCtx_t; + +typedef struct { + /* These fields are automatically set and shouldn't be changed by non WritePool code. */ + void *ctx; + FILE* file; + void *buffer; + size_t bufferSize; + + /* This field should be changed before a job is queued for execution and should contain the number + * of bytes to write from the buffer. */ + size_t usedBufferSize; + U64 offset; +} IOJob_t; + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void); + + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t *job); + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t *ctx); + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Enqueues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job); + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t *ctx); + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t *ctx, FILE* file); + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx); + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t *ctx); + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. + * bufferSize should be set to the maximal buffer we want to write to at a time. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx); + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async); + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize); + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx); + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async); + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_fillBuffer: + * Makes sure buffer has at least n bytes loaded (as long as n is not bigger than the initialized bufferSize). + * Returns if srcBuffer has at least n bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times bufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t *ctx, size_t n); + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t *ctx, FILE* file); + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t *ctx); + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t *ctx); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FILEIO_ASYNCIO_H */ diff --git a/sys/contrib/zstd/programs/fileio_asyncio.c b/sys/contrib/zstd/programs/fileio_asyncio.c new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/programs/fileio_asyncio.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "platform.h" +#include /* fprintf, open, fdopen, fread, _fileno, stdin, stdout */ +#include /* malloc, free */ +#include +#include /* errno */ + +#if defined (_MSC_VER) +# include +# include +#endif + +#include "fileio_asyncio.h" +#include "fileio_common.h" + +/* ********************************************************************** + * Sparse write + ************************************************************************/ + +/** AIO_fwriteSparse() : +* @return : storedSkips, +* argument for next call to AIO_fwriteSparse() or AIO_fwriteSparseEnd() */ +static unsigned +AIO_fwriteSparse(FILE* file, + const void* buffer, size_t bufferSize, + const FIO_prefs_t* const prefs, + unsigned storedSkips) +{ + const size_t* const bufferT = (const size_t*)buffer; /* Buffer is supposed malloc'ed, hence aligned on size_t */ + size_t bufferSizeT = bufferSize / sizeof(size_t); + const size_t* const bufferTEnd = bufferT + bufferSizeT; + const size_t* ptrT = bufferT; + static const size_t segmentSizeT = (32 KB) / sizeof(size_t); /* check every 32 KB */ + + if (prefs->testMode) return 0; /* do not output anything in test mode */ + + if (!prefs->sparseFileSupport) { /* normal write */ + size_t const sizeCheck = fwrite(buffer, 1, bufferSize, file); + if (sizeCheck != bufferSize) + EXM_THROW(70, "Write error : cannot write block : %s", + strerror(errno)); + return 0; + } + + /* avoid int overflow */ + if (storedSkips > 1 GB) { + if (LONG_SEEK(file, 1 GB, SEEK_CUR) != 0) + EXM_THROW(91, "1 GB skip error (sparse file support)"); + storedSkips -= 1 GB; + } + + while (ptrT < bufferTEnd) { + size_t nb0T; + + /* adjust last segment if < 32 KB */ + size_t seg0SizeT = segmentSizeT; + if (seg0SizeT > bufferSizeT) seg0SizeT = bufferSizeT; + bufferSizeT -= seg0SizeT; + + /* count leading zeroes */ + for (nb0T=0; (nb0T < seg0SizeT) && (ptrT[nb0T] == 0); nb0T++) ; + storedSkips += (unsigned)(nb0T * sizeof(size_t)); + + if (nb0T != seg0SizeT) { /* not all 0s */ + size_t const nbNon0ST = seg0SizeT - nb0T; + /* skip leading zeros */ + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + storedSkips = 0; + /* write the rest */ + if (fwrite(ptrT + nb0T, sizeof(size_t), nbNon0ST, file) != nbNon0ST) + EXM_THROW(93, "Write error : cannot write block : %s", + strerror(errno)); + } + ptrT += seg0SizeT; + } + + { static size_t const maskT = sizeof(size_t)-1; + if (bufferSize & maskT) { + /* size not multiple of sizeof(size_t) : implies end of block */ + const char* const restStart = (const char*)bufferTEnd; + const char* restPtr = restStart; + const char* const restEnd = (const char*)buffer + bufferSize; + assert(restEnd > restStart && restEnd < restStart + sizeof(size_t)); + for ( ; (restPtr < restEnd) && (*restPtr == 0); restPtr++) ; + storedSkips += (unsigned) (restPtr - restStart); + if (restPtr != restEnd) { + /* not all remaining bytes are 0 */ + size_t const restSize = (size_t)(restEnd - restPtr); + if (LONG_SEEK(file, storedSkips, SEEK_CUR) != 0) + EXM_THROW(92, "Sparse skip error ; try --no-sparse"); + if (fwrite(restPtr, 1, restSize, file) != restSize) + EXM_THROW(95, "Write error : cannot write end of decoded block : %s", + strerror(errno)); + storedSkips = 0; + } } } + + return storedSkips; +} + +static void +AIO_fwriteSparseEnd(const FIO_prefs_t* const prefs, FILE* file, unsigned storedSkips) +{ + if (prefs->testMode) assert(storedSkips == 0); + if (storedSkips>0) { + assert(prefs->sparseFileSupport > 0); /* storedSkips>0 implies sparse support is enabled */ + (void)prefs; /* assert can be disabled, in which case prefs becomes unused */ + if (LONG_SEEK(file, storedSkips-1, SEEK_CUR) != 0) + EXM_THROW(69, "Final skip error (sparse file support)"); + /* last zero must be explicitly written, + * so that skipped ones get implicitly translated as zero by FS */ + { const char lastZeroByte[1] = { 0 }; + if (fwrite(lastZeroByte, 1, 1, file) != 1) + EXM_THROW(69, "Write error : cannot write last zero : %s", strerror(errno)); + } } +} + + +/* ********************************************************************** + * AsyncIO functionality + ************************************************************************/ + +/* AIO_supported: + * Returns 1 if AsyncIO is supported on the system, 0 otherwise. */ +int AIO_supported(void) { +#ifdef ZSTD_MULTITHREAD + return 1; +#else + return 0; +#endif +} + +/* *********************************** + * Generic IoPool implementation + *************************************/ + +static IOJob_t *AIO_IOPool_createIoJob(IOPoolCtx_t *ctx, size_t bufferSize) { + IOJob_t* const job = (IOJob_t*) malloc(sizeof(IOJob_t)); + void* const buffer = malloc(bufferSize); + if(!job || !buffer) + EXM_THROW(101, "Allocation error : not enough memory"); + job->buffer = buffer; + job->bufferSize = bufferSize; + job->usedBufferSize = 0; + job->file = NULL; + job->ctx = ctx; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_createThreadPool: + * Creates a thread pool and a mutex for threaded IO pool. + * Displays warning if asyncio is requested but MT isn't available. */ +static void AIO_IOPool_createThreadPool(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs) { + ctx->threadPool = NULL; + ctx->threadPoolActive = 0; + if(prefs->asyncIO) { + if (ZSTD_pthread_mutex_init(&ctx->ioJobsMutex, NULL)) + EXM_THROW(102,"Failed creating ioJobsMutex mutex"); + /* We want MAX_IO_JOBS-2 queue items because we need to always have 1 free buffer to + * decompress into and 1 buffer that's actively written to disk and owned by the writing thread. */ + assert(MAX_IO_JOBS >= 2); + ctx->threadPool = POOL_create(1, MAX_IO_JOBS - 2); + ctx->threadPoolActive = 1; + if (!ctx->threadPool) + EXM_THROW(104, "Failed creating I/O thread pool"); + } +} + +/* AIO_IOPool_init: + * Allocates and sets and a new I/O thread pool including its included availableJobs. */ +static void AIO_IOPool_init(IOPoolCtx_t* ctx, const FIO_prefs_t* prefs, POOL_function poolFunction, size_t bufferSize) { + int i; + AIO_IOPool_createThreadPool(ctx, prefs); + ctx->prefs = prefs; + ctx->poolFunction = poolFunction; + ctx->totalIoJobs = ctx->threadPool ? MAX_IO_JOBS : 2; + ctx->availableJobsCount = ctx->totalIoJobs; + for(i=0; i < ctx->availableJobsCount; i++) { + ctx->availableJobs[i] = AIO_IOPool_createIoJob(ctx, bufferSize); + } + ctx->jobBufferSize = bufferSize; + ctx->file = NULL; +} + + +/* AIO_IOPool_threadPoolActive: + * Check if current operation uses thread pool. + * Note that in some cases we have a thread pool initialized but choose not to use it. */ +static int AIO_IOPool_threadPoolActive(IOPoolCtx_t* ctx) { + return ctx->threadPool && ctx->threadPoolActive; +} + + +/* AIO_IOPool_lockJobsMutex: + * Locks the IO jobs mutex if threading is active */ +static void AIO_IOPool_lockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_lock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_unlockJobsMutex: + * Unlocks the IO jobs mutex if threading is active */ +static void AIO_IOPool_unlockJobsMutex(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + ZSTD_pthread_mutex_unlock(&ctx->ioJobsMutex); +} + +/* AIO_IOPool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +static void AIO_IOPool_releaseIoJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *) job->ctx; + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount < ctx->totalIoJobs); + ctx->availableJobs[ctx->availableJobsCount++] = job; + AIO_IOPool_unlockJobsMutex(ctx); +} + +/* AIO_IOPool_join: + * Waits for all tasks in the pool to finish executing. */ +static void AIO_IOPool_join(IOPoolCtx_t* ctx) { + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_joinJobs(ctx->threadPool); +} + +/* AIO_IOPool_setThreaded: + * Allows (de)activating threaded mode, to be used when the expected overhead + * of threading costs more than the expected gains. */ +static void AIO_IOPool_setThreaded(IOPoolCtx_t* ctx, int threaded) { + assert(threaded == 0 || threaded == 1); + assert(ctx != NULL); + if(ctx->threadPoolActive != threaded) { + AIO_IOPool_join(ctx); + ctx->threadPoolActive = threaded; + } +} + +/* AIO_IOPool_free: + * Release a previously allocated IO thread pool. Makes sure all tasks are done and released. */ +static void AIO_IOPool_destroy(IOPoolCtx_t* ctx) { + int i; + if(ctx->threadPool) { + /* Make sure we finish all tasks and then free the resources */ + AIO_IOPool_join(ctx); + /* Make sure we are not leaking availableJobs */ + assert(ctx->availableJobsCount == ctx->totalIoJobs); + POOL_free(ctx->threadPool); + ZSTD_pthread_mutex_destroy(&ctx->ioJobsMutex); + } + assert(ctx->file == NULL); + for(i=0; iavailableJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->availableJobs[i]; + free(job->buffer); + free(job); + } +} + +/* AIO_IOPool_acquireJob: + * Returns an available io job to be used for a future io. */ +static IOJob_t* AIO_IOPool_acquireJob(IOPoolCtx_t* ctx) { + IOJob_t *job; + assert(ctx->file != NULL || ctx->prefs->testMode); + AIO_IOPool_lockJobsMutex(ctx); + assert(ctx->availableJobsCount > 0); + job = (IOJob_t*) ctx->availableJobs[--ctx->availableJobsCount]; + AIO_IOPool_unlockJobsMutex(ctx); + job->usedBufferSize = 0; + job->file = ctx->file; + job->offset = 0; + return job; +} + + +/* AIO_IOPool_setFile: + * Sets the destination file for future files in the pool. + * Requires completion of all queued jobs and release of all otherwise acquired jobs. */ +static void AIO_IOPool_setFile(IOPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(ctx); + assert(ctx->availableJobsCount == ctx->totalIoJobs); + ctx->file = file; +} + +static FILE* AIO_IOPool_getFile(const IOPoolCtx_t* ctx) { + return ctx->file; +} + +/* AIO_IOPool_enqueueJob: + * Enqueues an io job for execution. + * The queued job shouldn't be used directly after queueing it. */ +static void AIO_IOPool_enqueueJob(IOJob_t* job) { + IOPoolCtx_t* const ctx = (IOPoolCtx_t *)job->ctx; + if(AIO_IOPool_threadPoolActive(ctx)) + POOL_add(ctx->threadPool, ctx->poolFunction, job); + else + ctx->poolFunction(job); +} + +/* *********************************** + * WritePool implementation + *************************************/ + +/* AIO_WritePool_acquireJob: + * Returns an available write job to be used for a future write. */ +IOJob_t* AIO_WritePool_acquireJob(WritePoolCtx_t* ctx) { + return AIO_IOPool_acquireJob(&ctx->base); +} + +/* AIO_WritePool_enqueueAndReacquireWriteJob: + * Queues a write job for execution and acquires a new one. + * After execution `job`'s pointed value would change to the newly acquired job. + * Make sure to set `usedBufferSize` to the wanted length before call. + * The queued job shouldn't be used directly after queueing it. */ +void AIO_WritePool_enqueueAndReacquireWriteJob(IOJob_t **job) { + AIO_IOPool_enqueueJob(*job); + *job = AIO_IOPool_acquireJob((IOPoolCtx_t *)(*job)->ctx); +} + +/* AIO_WritePool_sparseWriteEnd: + * Ends sparse writes to the current file. + * Blocks on completion of all current write jobs before executing. */ +void AIO_WritePool_sparseWriteEnd(WritePoolCtx_t* ctx) { + assert(ctx != NULL); + AIO_IOPool_join(&ctx->base); + AIO_fwriteSparseEnd(ctx->base.prefs, ctx->base.file, ctx->storedSkips); + ctx->storedSkips = 0; +} + +/* AIO_WritePool_setFile: + * Sets the destination file for future writes in the pool. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. + * Also requires ending of sparse write if a previous file was used in sparse mode. */ +void AIO_WritePool_setFile(WritePoolCtx_t* ctx, FILE* file) { + AIO_IOPool_setFile(&ctx->base, file); + assert(ctx->storedSkips == 0); +} + +/* AIO_WritePool_getFile: + * Returns the file the writePool is currently set to write to. */ +FILE* AIO_WritePool_getFile(const WritePoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_WritePool_releaseIoJob: + * Releases an acquired job back to the pool. Doesn't execute the job. */ +void AIO_WritePool_releaseIoJob(IOJob_t* job) { + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_closeFile: + * Ends sparse write and closes the writePool's current file and sets the file to NULL. + * Requires completion of all queues write jobs and release of all otherwise acquired jobs. */ +int AIO_WritePool_closeFile(WritePoolCtx_t* ctx) { + FILE* const dstFile = ctx->base.file; + assert(dstFile!=NULL || ctx->base.prefs->testMode!=0); + AIO_WritePool_sparseWriteEnd(ctx); + AIO_IOPool_setFile(&ctx->base, NULL); + return fclose(dstFile); +} + +/* AIO_WritePool_executeWriteJob: + * Executes a write job synchronously. Can be used as a function for a thread pool. */ +static void AIO_WritePool_executeWriteJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) job->ctx; + ctx->storedSkips = AIO_fwriteSparse(job->file, job->buffer, job->usedBufferSize, ctx->base.prefs, ctx->storedSkips); + AIO_IOPool_releaseIoJob(job); +} + +/* AIO_WritePool_create: + * Allocates and sets and a new write pool including its included jobs. */ +WritePoolCtx_t* AIO_WritePool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + WritePoolCtx_t* const ctx = (WritePoolCtx_t*) malloc(sizeof(WritePoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_WritePool_executeWriteJob, bufferSize); + ctx->storedSkips = 0; + return ctx; +} + +/* AIO_WritePool_free: + * Frees and releases a writePool and its resources. Closes destination file if needs to. */ +void AIO_WritePool_free(WritePoolCtx_t* ctx) { + /* Make sure we finish all tasks and then free the resources */ + if(AIO_WritePool_getFile(ctx)) + AIO_WritePool_closeFile(ctx); + AIO_IOPool_destroy(&ctx->base); + assert(ctx->storedSkips==0); + free(ctx); +} + +/* AIO_WritePool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_WritePool_setAsync(WritePoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} + + +/* *********************************** + * ReadPool implementation + *************************************/ +static void AIO_ReadPool_releaseAllCompletedJobs(ReadPoolCtx_t* ctx) { + int i; + for(i=0; icompletedJobsCount; i++) { + IOJob_t* job = (IOJob_t*) ctx->completedJobs[i]; + AIO_IOPool_releaseIoJob(job); + } + ctx->completedJobsCount = 0; +} + +static void AIO_ReadPool_addJobToCompleted(IOJob_t* job) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + AIO_IOPool_lockJobsMutex(&ctx->base); + assert(ctx->completedJobsCount < MAX_IO_JOBS); + ctx->completedJobs[ctx->completedJobsCount++] = job; + if(AIO_IOPool_threadPoolActive(&ctx->base)) { + ZSTD_pthread_cond_signal(&ctx->jobCompletedCond); + } + AIO_IOPool_unlockJobsMutex(&ctx->base); +} + +/* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked: + * Looks through the completed jobs for a job matching the waitingOnOffset and returns it, + * if job wasn't found returns NULL. + * IMPORTANT: assumes ioJobsMutex is locked. */ +static IOJob_t* AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + int i; + /* This implementation goes through all completed jobs and looks for the one matching the next offset. + * While not strictly needed for a single threaded reader implementation (as in such a case we could expect + * reads to be completed in order) this implementation was chosen as it better fits other asyncio + * interfaces (such as io_uring) that do not provide promises regarding order of completion. */ + for (i=0; icompletedJobsCount; i++) { + job = (IOJob_t *) ctx->completedJobs[i]; + if (job->offset == ctx->waitingOnOffset) { + ctx->completedJobs[i] = ctx->completedJobs[--ctx->completedJobsCount]; + return job; + } + } + return NULL; +} + +/* AIO_ReadPool_numReadsInFlight: + * Returns the number of IO read jobs currently in flight. */ +static size_t AIO_ReadPool_numReadsInFlight(ReadPoolCtx_t* ctx) { + const size_t jobsHeld = (ctx->currentJobHeld==NULL ? 0 : 1); + return ctx->base.totalIoJobs - (ctx->base.availableJobsCount + ctx->completedJobsCount + jobsHeld); +} + +/* AIO_ReadPool_getNextCompletedJob: + * Returns a completed IOJob_t for the next read in line based on waitingOnOffset and advances waitingOnOffset. + * Would block. */ +static IOJob_t* AIO_ReadPool_getNextCompletedJob(ReadPoolCtx_t* ctx) { + IOJob_t *job = NULL; + AIO_IOPool_lockJobsMutex(&ctx->base); + + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + + /* As long as we didn't find the job matching the next read, and we have some reads in flight continue waiting */ + while (!job && (AIO_ReadPool_numReadsInFlight(ctx) > 0)) { + assert(ctx->base.threadPool != NULL); /* we shouldn't be here if we work in sync mode */ + ZSTD_pthread_cond_wait(&ctx->jobCompletedCond, &ctx->base.ioJobsMutex); + job = AIO_ReadPool_findNextWaitingOffsetCompletedJob_locked(ctx); + } + + if(job) { + assert(job->offset == ctx->waitingOnOffset); + ctx->waitingOnOffset += job->usedBufferSize; + } + + AIO_IOPool_unlockJobsMutex(&ctx->base); + return job; +} + + +/* AIO_ReadPool_executeReadJob: + * Executes a read job synchronously. Can be used as a function for a thread pool. */ +static void AIO_ReadPool_executeReadJob(void* opaque){ + IOJob_t* const job = (IOJob_t*) opaque; + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t *)job->ctx; + if(ctx->reachedEof) { + job->usedBufferSize = 0; + AIO_ReadPool_addJobToCompleted(job); + return; + } + job->usedBufferSize = fread(job->buffer, 1, job->bufferSize, job->file); + if(job->usedBufferSize < job->bufferSize) { + if(ferror(job->file)) { + EXM_THROW(37, "Read error"); + } else if(feof(job->file)) { + ctx->reachedEof = 1; + } else { + EXM_THROW(37, "Unexpected short read"); + } + } + AIO_ReadPool_addJobToCompleted(job); +} + +static void AIO_ReadPool_enqueueRead(ReadPoolCtx_t* ctx) { + IOJob_t* const job = AIO_IOPool_acquireJob(&ctx->base); + job->offset = ctx->nextReadOffset; + ctx->nextReadOffset += job->bufferSize; + AIO_IOPool_enqueueJob(job); +} + +static void AIO_ReadPool_startReading(ReadPoolCtx_t* ctx) { + int i; + for (i = 0; i < ctx->base.availableJobsCount; i++) { + AIO_ReadPool_enqueueRead(ctx); + } +} + +/* AIO_ReadPool_setFile: + * Sets the source file for future read in the pool. Initiates reading immediately if file is not NULL. + * Waits for all current enqueued tasks to complete if a previous file was set. */ +void AIO_ReadPool_setFile(ReadPoolCtx_t* ctx, FILE* file) { + assert(ctx!=NULL); + AIO_IOPool_join(&ctx->base); + AIO_ReadPool_releaseAllCompletedJobs(ctx); + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + } + AIO_IOPool_setFile(&ctx->base, file); + ctx->nextReadOffset = 0; + ctx->waitingOnOffset = 0; + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->reachedEof = 0; + if(file != NULL) + AIO_ReadPool_startReading(ctx); +} + +/* AIO_ReadPool_create: + * Allocates and sets and a new readPool including its included jobs. + * bufferSize should be set to the maximal buffer we want to read at a time, will also be used + * as our basic read size. */ +ReadPoolCtx_t* AIO_ReadPool_create(const FIO_prefs_t* prefs, size_t bufferSize) { + ReadPoolCtx_t* const ctx = (ReadPoolCtx_t*) malloc(sizeof(ReadPoolCtx_t)); + if(!ctx) EXM_THROW(100, "Allocation error : not enough memory"); + AIO_IOPool_init(&ctx->base, prefs, AIO_ReadPool_executeReadJob, bufferSize); + + ctx->coalesceBuffer = (U8*) malloc(bufferSize * 2); + ctx->srcBuffer = ctx->coalesceBuffer; + ctx->srcBufferLoaded = 0; + ctx->completedJobsCount = 0; + ctx->currentJobHeld = NULL; + + if(ctx->base.threadPool) + if (ZSTD_pthread_cond_init(&ctx->jobCompletedCond, NULL)) + EXM_THROW(103,"Failed creating jobCompletedCond cond"); + + return ctx; +} + +/* AIO_ReadPool_free: + * Frees and releases a readPool and its resources. Closes source file. */ +void AIO_ReadPool_free(ReadPoolCtx_t* ctx) { + if(AIO_ReadPool_getFile(ctx)) + AIO_ReadPool_closeFile(ctx); + if(ctx->base.threadPool) + ZSTD_pthread_cond_destroy(&ctx->jobCompletedCond); + AIO_IOPool_destroy(&ctx->base); + free(ctx->coalesceBuffer); + free(ctx); +} + +/* AIO_ReadPool_consumeBytes: + * Consumes byes from srcBuffer's beginning and updates srcBufferLoaded accordingly. */ +void AIO_ReadPool_consumeBytes(ReadPoolCtx_t* ctx, size_t n) { + assert(n <= ctx->srcBufferLoaded); + ctx->srcBufferLoaded -= n; + ctx->srcBuffer += n; +} + +/* AIO_ReadPool_releaseCurrentlyHeldAndGetNext: + * Release the current held job and get the next one, returns NULL if no next job available. */ +static IOJob_t* AIO_ReadPool_releaseCurrentHeldAndGetNext(ReadPoolCtx_t* ctx) { + if (ctx->currentJobHeld) { + AIO_IOPool_releaseIoJob((IOJob_t *)ctx->currentJobHeld); + ctx->currentJobHeld = NULL; + AIO_ReadPool_enqueueRead(ctx); + } + ctx->currentJobHeld = AIO_ReadPool_getNextCompletedJob(ctx); + return (IOJob_t*) ctx->currentJobHeld; +} + +/* AIO_ReadPool_fillBuffer: + * Tries to fill the buffer with at least n or jobBufferSize bytes (whichever is smaller). + * Returns if srcBuffer has at least the expected number of bytes loaded or if we've reached the end of the file. + * Return value is the number of bytes added to the buffer. + * Note that srcBuffer might have up to 2 times jobBufferSize bytes. */ +size_t AIO_ReadPool_fillBuffer(ReadPoolCtx_t* ctx, size_t n) { + IOJob_t *job; + int useCoalesce = 0; + if(n > ctx->base.jobBufferSize) + n = ctx->base.jobBufferSize; + + /* We are good, don't read anything */ + if (ctx->srcBufferLoaded >= n) + return 0; + + /* We still have bytes loaded, but not enough to satisfy caller. We need to get the next job + * and coalesce the remaining bytes with the next job's buffer */ + if (ctx->srcBufferLoaded > 0) { + useCoalesce = 1; + memcpy(ctx->coalesceBuffer, ctx->srcBuffer, ctx->srcBufferLoaded); + ctx->srcBuffer = ctx->coalesceBuffer; + } + + /* Read the next chunk */ + job = AIO_ReadPool_releaseCurrentHeldAndGetNext(ctx); + if(!job) + return 0; + if(useCoalesce) { + assert(ctx->srcBufferLoaded + job->usedBufferSize <= 2*ctx->base.jobBufferSize); + memcpy(ctx->coalesceBuffer + ctx->srcBufferLoaded, job->buffer, job->usedBufferSize); + ctx->srcBufferLoaded += job->usedBufferSize; + } + else { + ctx->srcBuffer = (U8 *) job->buffer; + ctx->srcBufferLoaded = job->usedBufferSize; + } + return job->usedBufferSize; +} + +/* AIO_ReadPool_consumeAndRefill: + * Consumes the current buffer and refills it with bufferSize bytes. */ +size_t AIO_ReadPool_consumeAndRefill(ReadPoolCtx_t* ctx) { + AIO_ReadPool_consumeBytes(ctx, ctx->srcBufferLoaded); + return AIO_ReadPool_fillBuffer(ctx, ctx->base.jobBufferSize); +} + +/* AIO_ReadPool_getFile: + * Returns the current file set for the read pool. */ +FILE* AIO_ReadPool_getFile(const ReadPoolCtx_t* ctx) { + return AIO_IOPool_getFile(&ctx->base); +} + +/* AIO_ReadPool_closeFile: + * Closes the current set file. Waits for all current enqueued tasks to complete and resets state. */ +int AIO_ReadPool_closeFile(ReadPoolCtx_t* ctx) { + FILE* const file = AIO_ReadPool_getFile(ctx); + AIO_ReadPool_setFile(ctx, NULL); + return fclose(file); +} + +/* AIO_ReadPool_setAsync: + * Allows (de)activating async mode, to be used when the expected overhead + * of asyncio costs more than the expected gains. */ +void AIO_ReadPool_setAsync(ReadPoolCtx_t* ctx, int async) { + AIO_IOPool_setThreaded(&ctx->base, async); +} diff --git a/sys/contrib/zstd/programs/fileio_common.h b/sys/contrib/zstd/programs/fileio_common.h new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/programs/fileio_common.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FILEIO_COMMON_H +#define ZSTD_FILEIO_COMMON_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../lib/common/mem.h" /* U32, U64 */ +#include "fileio_types.h" +#include "platform.h" +#include "timefn.h" /* UTIL_getTime, UTIL_clockSpanMicro */ + +/*-************************************* +* Macros +***************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) +#undef MAX +#define MAX(a,b) ((a)>(b) ? (a) : (b)) + +extern FIO_display_prefs_t g_display_prefs; + +#define DISPLAY(...) fprintf(stderr, __VA_ARGS__) +#define DISPLAYOUT(...) fprintf(stdout, __VA_ARGS__) +#define DISPLAYLEVEL(l, ...) { if (g_display_prefs.displayLevel>=l) { DISPLAY(__VA_ARGS__); } } + +extern UTIL_time_t g_displayClock; + +#define REFRESH_RATE ((U64)(SEC_TO_MICRO / 6)) +#define READY_FOR_UPDATE() (UTIL_clockSpanMicro(g_displayClock) > REFRESH_RATE || g_display_prefs.displayLevel >= 4) +#define DELAY_NEXT_UPDATE() { g_displayClock = UTIL_getTime(); } +#define DISPLAYUPDATE(l, ...) { \ + if (g_display_prefs.displayLevel>=l && (g_display_prefs.progressSetting != FIO_ps_never)) { \ + if (READY_FOR_UPDATE()) { \ + DELAY_NEXT_UPDATE(); \ + DISPLAY(__VA_ARGS__); \ + if (g_display_prefs.displayLevel>=4) fflush(stderr); \ + } } } + +#define SHOULD_DISPLAY_SUMMARY() \ + (g_display_prefs.displayLevel >= 2 || g_display_prefs.progressSetting == FIO_ps_always) +#define SHOULD_DISPLAY_PROGRESS() \ + (g_display_prefs.progressSetting != FIO_ps_never && SHOULD_DISPLAY_SUMMARY()) +#define DISPLAY_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYLEVEL(1, __VA_ARGS__); }} +#define DISPLAYUPDATE_PROGRESS(...) { if (SHOULD_DISPLAY_PROGRESS()) { DISPLAYUPDATE(1, __VA_ARGS__); }} +#define DISPLAY_SUMMARY(...) { if (SHOULD_DISPLAY_SUMMARY()) { DISPLAYLEVEL(1, __VA_ARGS__); } } + +#undef MIN /* in case it would be already defined */ +#define MIN(a,b) ((a) < (b) ? (a) : (b)) + + +#define EXM_THROW(error, ...) \ +{ \ + DISPLAYLEVEL(1, "zstd: "); \ + DISPLAYLEVEL(5, "Error defined at %s, line %i : \n", __FILE__, __LINE__); \ + DISPLAYLEVEL(1, "error %i : ", error); \ + DISPLAYLEVEL(1, __VA_ARGS__); \ + DISPLAYLEVEL(1, " \n"); \ + exit(error); \ +} + +#define CHECK_V(v, f) \ + v = f; \ + if (ZSTD_isError(v)) { \ + DISPLAYLEVEL(5, "%s \n", #f); \ + EXM_THROW(11, "%s", ZSTD_getErrorName(v)); \ + } +#define CHECK(f) { size_t err; CHECK_V(err, f); } + + +/* Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW */ +#if defined(_MSC_VER) && _MSC_VER >= 1400 +# define LONG_SEEK _fseeki64 +# define LONG_TELL _ftelli64 +#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ +# define LONG_SEEK fseeko +# define LONG_TELL ftello +#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) +# define LONG_SEEK fseeko64 +# define LONG_TELL ftello64 +#elif defined(_WIN32) && !defined(__DJGPP__) +# include + static int LONG_SEEK(FILE* file, __int64 offset, int origin) { + LARGE_INTEGER off; + DWORD method; + off.QuadPart = offset; + if (origin == SEEK_END) + method = FILE_END; + else if (origin == SEEK_CUR) + method = FILE_CURRENT; + else + method = FILE_BEGIN; + + if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) + return 0; + else + return -1; + } + static __int64 LONG_TELL(FILE* file) { + LARGE_INTEGER off, newOff; + off.QuadPart = 0; + newOff.QuadPart = 0; + SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, &newOff, FILE_CURRENT); + return newOff.QuadPart; + } +#else +# define LONG_SEEK fseek +# define LONG_TELL ftell +#endif + +#if defined (__cplusplus) +} +#endif +#endif /* ZSTD_FILEIO_COMMON_H */ diff --git a/sys/contrib/zstd/programs/fileio_types.h b/sys/contrib/zstd/programs/fileio_types.h new file mode 100644 --- /dev/null +++ b/sys/contrib/zstd/programs/fileio_types.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef FILEIO_TYPES_HEADER +#define FILEIO_TYPES_HEADER + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../lib/zstd.h" /* ZSTD_* */ + +/*-************************************* +* Parameters: FIO_prefs_t +***************************************/ + +typedef struct FIO_display_prefs_s FIO_display_prefs_t; + +typedef enum { FIO_ps_auto, FIO_ps_never, FIO_ps_always } FIO_progressSetting_e; + +struct FIO_display_prefs_s { + int displayLevel; /* 0 : no display; 1: errors; 2: + result + interaction + warnings; 3: + progression; 4: + information */ + FIO_progressSetting_e progressSetting; +}; + + +typedef enum { FIO_zstdCompression, FIO_gzipCompression, FIO_xzCompression, FIO_lzmaCompression, FIO_lz4Compression } FIO_compressionType_t; + +typedef struct FIO_prefs_s { + + /* Algorithm preferences */ + FIO_compressionType_t compressionType; + int sparseFileSupport; /* 0: no sparse allowed; 1: auto (file yes, stdout no); 2: force sparse */ + int dictIDFlag; + int checksumFlag; + int blockSize; + int overlapLog; + int adaptiveMode; + int useRowMatchFinder; + int rsyncable; + int minAdaptLevel; + int maxAdaptLevel; + int ldmFlag; + int ldmHashLog; + int ldmMinMatch; + int ldmBucketSizeLog; + int ldmHashRateLog; + size_t streamSrcSize; + size_t targetCBlockSize; + int srcSizeHint; + int testMode; + ZSTD_paramSwitch_e literalCompressionMode; + + /* IO preferences */ + int removeSrcFile; + int overwrite; + int asyncIO; + + /* Computation resources preferences */ + unsigned memLimit; + int nbWorkers; + + int excludeCompressedFiles; + int patchFromMode; + int contentSize; + int allowBlockDevices; + int passThrough; + ZSTD_paramSwitch_e mmapDict; +} FIO_prefs_t; + +typedef enum {FIO_mallocDict, FIO_mmapDict} FIO_dictBufferType_t; + +typedef struct { + void* dictBuffer; + size_t dictBufferSize; + FIO_dictBufferType_t dictBufferType; +#if defined(_MSC_VER) || defined(_WIN32) + HANDLE dictHandle; +#endif +} FIO_Dict_t; + +#endif /* FILEIO_TYPES_HEADER */ diff --git a/sys/contrib/zstd/programs/platform.h b/sys/contrib/zstd/programs/platform.h --- a/sys/contrib/zstd/programs/platform.h +++ b/sys/contrib/zstd/programs/platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -33,7 +33,7 @@ /* ************************************** * Detect 64-bit OS -* http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros +* https://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros ****************************************/ #if defined __ia64 || defined _M_IA64 /* Intel Itanium */ \ || defined __powerpc64__ || defined __ppc64__ || defined __PPC64__ /* POWER 64-bit */ \ @@ -80,7 +80,7 @@ * note: it's better to use unistd.h's _POSIX_VERSION whenever possible */ # define PLATFORM_POSIX_VERSION 200112L -/* try to determine posix version through official unistd.h's _POSIX_VERSION (http://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). +/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html). * note : there is no simple way to know in advance if is present or not on target system, * Posix specification mandates its presence and its content, but target system must respect this spec. * It's necessary to _not_ #include whenever target OS is not unix-like @@ -127,6 +127,10 @@ /*-********************************************* * Detect if isatty() and fileno() are available +* +* Note: Use UTIL_isConsole() for the zstd CLI +* instead, as it allows faking is console for +* testing. ************************************************/ #if (defined(__linux__) && (PLATFORM_POSIX_VERSION > 1)) \ || (PLATFORM_POSIX_VERSION >= 200112L) \ @@ -192,13 +196,13 @@ #ifndef ZSTD_SETPRIORITY_SUPPORT - /* mandates presence of and support for setpriority() : http://man7.org/linux/man-pages/man2/setpriority.2.html */ + /* mandates presence of and support for setpriority() : https://man7.org/linux/man-pages/man2/setpriority.2.html */ # define ZSTD_SETPRIORITY_SUPPORT (PLATFORM_POSIX_VERSION >= 200112L) #endif #ifndef ZSTD_NANOSLEEP_SUPPORT - /* mandates support of nanosleep() within : http://man7.org/linux/man-pages/man2/nanosleep.2.html */ + /* mandates support of nanosleep() within : https://man7.org/linux/man-pages/man2/nanosleep.2.html */ # if (defined(__linux__) && (PLATFORM_POSIX_VERSION >= 199309L)) \ || (PLATFORM_POSIX_VERSION >= 200112L) # define ZSTD_NANOSLEEP_SUPPORT 1 diff --git a/sys/contrib/zstd/programs/timefn.h b/sys/contrib/zstd/programs/timefn.h --- a/sys/contrib/zstd/programs/timefn.h +++ b/sys/contrib/zstd/programs/timefn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -16,70 +16,51 @@ #endif -/*-**************************************** -* Dependencies -******************************************/ -#include /* clock_t, clock, CLOCKS_PER_SEC */ - - /*-**************************************** -* Local Types +* Types ******************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # if defined(_AIX) # include # else -# include /* intptr_t */ +# include /* uint64_t */ # endif typedef uint64_t PTime; /* Precise Time */ #else typedef unsigned long long PTime; /* does not support compilers without long long support */ #endif +/* UTIL_time_t contains a nanosecond time counter. + * The absolute value is not meaningful. + * It's only valid to compute the difference between 2 measurements. */ +typedef struct { PTime t; } UTIL_time_t; +#define UTIL_TIME_INITIALIZER { 0 } /*-**************************************** * Time functions ******************************************/ -#if defined(_WIN32) /* Windows */ - - #include /* LARGE_INTEGER */ - typedef LARGE_INTEGER UTIL_time_t; - #define UTIL_TIME_INITIALIZER { { 0, 0 } } - -#elif defined(__APPLE__) && defined(__MACH__) - - #include - typedef PTime UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ -#elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ - && defined(TIME_UTC) && !defined(__ANDROID__) - - typedef struct timespec UTIL_time_t; - #define UTIL_TIME_INITIALIZER { 0, 0 } - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - typedef clock_t UTIL_time_t; - #define UTIL_TIME_INITIALIZER 0 - -#endif +UTIL_time_t UTIL_getTime(void); +/* Timer resolution can be low on some platforms. + * To improve accuracy, it's recommended to wait for a new tick + * before starting benchmark measurements */ +void UTIL_waitForNextTick(void); +/* tells if timefn will return correct time measurements + * in presence of multi-threaded workload. + * note : this is not the case if only C90 clock_t measurements are available */ +int UTIL_support_MT_measurements(void); -UTIL_time_t UTIL_getTime(void); -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd); +PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -#define SEC_TO_MICRO ((PTime)1000000) +PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd); PTime UTIL_clockSpanMicro(UTIL_time_t clockStart); -PTime UTIL_clockSpanNano(UTIL_time_t clockStart); -void UTIL_waitForNextTick(void); +#define SEC_TO_MICRO ((PTime)1000000) /* nb of microseconds in a second */ #if defined (__cplusplus) diff --git a/sys/contrib/zstd/programs/timefn.c b/sys/contrib/zstd/programs/timefn.c --- a/sys/contrib/zstd/programs/timefn.c +++ b/sys/contrib/zstd/programs/timefn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -12,7 +12,8 @@ /* === Dependencies === */ #include "timefn.h" - +#include "platform.h" /* set _POSIX_C_SOURCE */ +#include /* CLOCK_MONOTONIC, TIME_UTC */ /*-**************************************** * Time functions @@ -20,12 +21,11 @@ #if defined(_WIN32) /* Windows */ +#include /* LARGE_INTEGER */ #include /* abort */ #include /* perror */ -UTIL_time_t UTIL_getTime(void) { UTIL_time_t x; QueryPerformanceCounter(&x); return x; } - -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static LARGE_INTEGER ticksPerSecond; static int init = 0; @@ -36,30 +36,20 @@ } init = 1; } - return 1000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; -} - -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) -{ - static LARGE_INTEGER ticksPerSecond; - static int init = 0; - if (!init) { - if (!QueryPerformanceFrequency(&ticksPerSecond)) { - perror("timefn::QueryPerformanceFrequency"); - abort(); - } - init = 1; + { UTIL_time_t r; + LARGE_INTEGER x; + QueryPerformanceCounter(&x); + r.t = (PTime)(x.QuadPart * 1000000000ULL / ticksPerSecond.QuadPart); + return r; } - return 1000000000ULL*(clockEnd.QuadPart - clockStart.QuadPart)/ticksPerSecond.QuadPart; } - #elif defined(__APPLE__) && defined(__MACH__) -UTIL_time_t UTIL_getTime(void) { return mach_absolute_time(); } +#include /* mach_timebase_info_data_t, mach_timebase_info, mach_absolute_time */ -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) +UTIL_time_t UTIL_getTime(void) { static mach_timebase_info_data_t rate; static int init = 0; @@ -67,23 +57,39 @@ mach_timebase_info(&rate); init = 1; } - return (((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom))/1000ULL; + { UTIL_time_t r; + r.t = mach_absolute_time() * (PTime)rate.numer / (PTime)rate.denom; + return r; + } } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +/* POSIX.1-2001 (optional) */ +#elif defined(CLOCK_MONOTONIC) + +#include /* abort */ +#include /* perror */ + +UTIL_time_t UTIL_getTime(void) { - static mach_timebase_info_data_t rate; - static int init = 0; - if (!init) { - mach_timebase_info(&rate); - init = 1; + /* time must be initialized, othersize it may fail msan test. + * No good reason, likely a limitation of timespec_get() for some target */ + struct timespec time = { 0, 0 }; + if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { + perror("timefn::clock_gettime(CLOCK_MONOTONIC)"); + abort(); + } + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return ((clockEnd - clockStart) * (PTime)rate.numer) / ((PTime)rate.denom); } -/* C11 requires timespec_get, but FreeBSD 11 lacks it, while still claiming C11 compliance. - Android also lacks it but does define TIME_UTC. */ +/* C11 requires support of timespec_get(). + * However, FreeBSD 11 claims C11 compliance while lacking timespec_get(). + * Double confirm timespec_get() support by checking the definition of TIME_UTC. + * However, some versions of Android manage to simultaneously define TIME_UTC + * and lack timespec_get() support... */ #elif (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */) \ && defined(TIME_UTC) && !defined(__ANDROID__) @@ -94,65 +100,49 @@ { /* time must be initialized, othersize it may fail msan test. * No good reason, likely a limitation of timespec_get() for some target */ - UTIL_time_t time = UTIL_TIME_INITIALIZER; + struct timespec time = { 0, 0 }; if (timespec_get(&time, TIME_UTC) != TIME_UTC) { - perror("timefn::timespec_get"); + perror("timefn::timespec_get(TIME_UTC)"); abort(); } - return time; -} - -static UTIL_time_t UTIL_getSpanTime(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t diff; - if (end.tv_nsec < begin.tv_nsec) { - diff.tv_sec = (end.tv_sec - 1) - begin.tv_sec; - diff.tv_nsec = (end.tv_nsec + 1000000000ULL) - begin.tv_nsec; - } else { - diff.tv_sec = end.tv_sec - begin.tv_sec; - diff.tv_nsec = end.tv_nsec - begin.tv_nsec; + { UTIL_time_t r; + r.t = (PTime)time.tv_sec * 1000000000ULL + (PTime)time.tv_nsec; + return r; } - return diff; } -PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) -{ - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime micro = 0; - micro += 1000000ULL * diff.tv_sec; - micro += diff.tv_nsec / 1000ULL; - return micro; -} -PTime UTIL_getSpanTimeNano(UTIL_time_t begin, UTIL_time_t end) +#else /* relies on standard C90 (note : clock_t produces wrong measurements for multi-threaded workloads) */ + +UTIL_time_t UTIL_getTime(void) { - UTIL_time_t const diff = UTIL_getSpanTime(begin, end); - PTime nano = 0; - nano += 1000000000ULL * diff.tv_sec; - nano += diff.tv_nsec; - return nano; + UTIL_time_t r; + r.t = (PTime)clock() * 1000000000ULL / CLOCKS_PER_SEC; + return r; } - - -#else /* relies on standard C90 (note : clock_t measurements can be wrong when using multi-threading) */ - -UTIL_time_t UTIL_getTime(void) { return clock(); } -PTime UTIL_getSpanTimeMicro(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } -PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) { return 1000000000ULL * (clockEnd - clockStart) / CLOCKS_PER_SEC; } +#define TIME_MT_MEASUREMENTS_NOT_SUPPORTED #endif +/* ==== Common functions, valid for all time API ==== */ +PTime UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd) +{ + return clockEnd.t - clockStart.t; +} + +PTime UTIL_getSpanTimeMicro(UTIL_time_t begin, UTIL_time_t end) +{ + return UTIL_getSpanTimeNano(begin, end) / 1000ULL; +} -/* returns time span in microseconds */ PTime UTIL_clockSpanMicro(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); return UTIL_getSpanTimeMicro(clockStart, clockEnd); } -/* returns time span in microseconds */ PTime UTIL_clockSpanNano(UTIL_time_t clockStart ) { UTIL_time_t const clockEnd = UTIL_getTime(); @@ -167,3 +157,12 @@ clockEnd = UTIL_getTime(); } while (UTIL_getSpanTimeNano(clockStart, clockEnd) == 0); } + +int UTIL_support_MT_measurements(void) +{ +# if defined(TIME_MT_MEASUREMENTS_NOT_SUPPORTED) + return 0; +# else + return 1; +# endif +} diff --git a/sys/contrib/zstd/programs/util.h b/sys/contrib/zstd/programs/util.h --- a/sys/contrib/zstd/programs/util.h +++ b/sys/contrib/zstd/programs/util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -126,15 +126,25 @@ /** * Calls platform's equivalent of stat() on filename and writes info to statbuf. * Returns success (1) or failure (0). + * + * UTIL_fstat() is like UTIL_stat() but takes an optional fd that refers to the + * file in question. It turns out that this can be meaningfully faster. If fd is + * -1, behaves just like UTIL_stat() (i.e., falls back to using the filename). */ int UTIL_stat(const char* filename, stat_t* statbuf); +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf); /** * Instead of getting a file's stats, this updates them with the info in the * provided stat_t. Currently sets owner, group, atime, and mtime. Will only * update this info for regular files. + * + * UTIL_setFDStat() also takes an fd, and will preferentially use that to + * indicate which file to modify, If fd is -1, it will fall back to using the + * filename. */ int UTIL_setFileStat(const char* filename, const stat_t* statbuf); +int UTIL_setFDStat(const int fd, const char* filename, const stat_t* statbuf); /** * Set atime to now and mtime to the st_mtim in statbuf. @@ -159,8 +169,11 @@ * Like chmod(), but only modifies regular files. Provided statbuf may be NULL, * in which case this function will stat() the file internally, in order to * check whether it should be modified. + * + * If fd is -1, fd is ignored and the filename is used. */ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions); +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions); /* * In the absence of a pre-existing stat result on the file in question, these @@ -171,10 +184,30 @@ int UTIL_isRegularFile(const char* infilename); int UTIL_isDirectory(const char* infilename); int UTIL_isSameFile(const char* file1, const char* file2); +int UTIL_isSameFileStat(const char* file1, const char* file2, const stat_t* file1Stat, const stat_t* file2Stat); int UTIL_isCompressedFile(const char* infilename, const char *extensionList[]); int UTIL_isLink(const char* infilename); int UTIL_isFIFO(const char* infilename); +/** + * Returns with the given file descriptor is a console. + * Allows faking whether stdin/stdout/stderr is a console + * using UTIL_fake*IsConsole(). + */ +int UTIL_isConsole(FILE* file); + +/** + * Pretends that stdin/stdout/stderr is a console for testing. + */ +void UTIL_fakeStdinIsConsole(void); +void UTIL_fakeStdoutIsConsole(void); +void UTIL_fakeStderrIsConsole(void); + +/** + * Emit traces for functions that read, or modify file metadata. + */ +void UTIL_traceFileStat(void); + #define UTIL_FILESIZE_UNKNOWN ((U64)(-1)) U64 UTIL_getFileSize(const char* infilename); U64 UTIL_getTotalFileSize(const char* const * fileNamesTable, unsigned nbFiles); @@ -248,7 +281,6 @@ /*! UTIL_expandFNT() : * read names from @fnt, and expand those corresponding to directories * update @fnt, now containing only file names, - * @return : 0 in case of success, 1 if error * note : in case of error, @fnt[0] is NULL */ void UTIL_expandFNT(FileNamesTable** fnt, int followLinks); @@ -269,6 +301,11 @@ */ FileNamesTable* UTIL_allocateFileNamesTable(size_t tableSize); +/*! UTIL_searchFileNamesTable() : + * Searched through entries in FileNamesTable for a specific name. + * @return : index of entry if found or -1 if not found + */ +int UTIL_searchFileNamesTable(FileNamesTable* table, char const* name); /*! UTIL_refFilename() : * Add a reference to read-only name into @fnt table. diff --git a/sys/contrib/zstd/programs/util.c b/sys/contrib/zstd/programs/util.c --- a/sys/contrib/zstd/programs/util.c +++ b/sys/contrib/zstd/programs/util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -66,6 +66,27 @@ #define UTIL_DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } } +static int g_traceDepth = 0; +int g_traceFileStat = 0; + +#define UTIL_TRACE_CALL(...) \ + { \ + if (g_traceFileStat) { \ + UTIL_DISPLAY("Trace:FileStat: %*s> ", g_traceDepth, ""); \ + UTIL_DISPLAY(__VA_ARGS__); \ + UTIL_DISPLAY("\n"); \ + ++g_traceDepth; \ + } \ + } + +#define UTIL_TRACE_RET(ret) \ + { \ + if (g_traceFileStat) { \ + --g_traceDepth; \ + UTIL_DISPLAY("Trace:FileStat: %*s< %d\n", g_traceDepth, "", (ret)); \ + } \ + } + /* A modified version of realloc(). * If UTIL_realloc() fails the original block is freed. */ @@ -81,6 +102,17 @@ #define chmod _chmod #endif +#ifndef ZSTD_HAVE_FCHMOD +#if PLATFORM_POSIX_VERSION >= 199309L +#define ZSTD_HAVE_FCHMOD +#endif +#endif + +#ifndef ZSTD_HAVE_FCHOWN +#if PLATFORM_POSIX_VERSION >= 200809L +#define ZSTD_HAVE_FCHOWN +#endif +#endif /*-**************************************** * Console log @@ -100,7 +132,7 @@ ch = getchar(); result = 0; if (strchr(acceptableLetters, ch) == NULL) { - UTIL_DISPLAY("%s", abortMsg); + UTIL_DISPLAY("%s \n", abortMsg); result = 1; } /* flush the rest */ @@ -121,21 +153,51 @@ * Functions ***************************************/ -int UTIL_stat(const char* filename, stat_t* statbuf) +void UTIL_traceFileStat(void) +{ + g_traceFileStat = 1; +} + +int UTIL_fstat(const int fd, const char* filename, stat_t* statbuf) { + int ret; + UTIL_TRACE_CALL("UTIL_stat(%d, %s)", fd, filename); #if defined(_MSC_VER) - return !_stat64(filename, statbuf); + if (fd >= 0) { + ret = !_fstat64(fd, statbuf); + } else { + ret = !_stat64(filename, statbuf); + } #elif defined(__MINGW32__) && defined (__MSVCRT__) - return !_stati64(filename, statbuf); + if (fd >= 0) { + ret = !_fstati64(fd, statbuf); + } else { + ret = !_stati64(filename, statbuf); + } #else - return !stat(filename, statbuf); + if (fd >= 0) { + ret = !fstat(fd, statbuf); + } else { + ret = !stat(filename, statbuf); + } #endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_stat(const char* filename, stat_t* statbuf) +{ + return UTIL_fstat(-1, filename, statbuf); } int UTIL_isRegularFile(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isRegularFile(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isRegularFileStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isRegularFileStat(const stat_t* statbuf) @@ -149,73 +211,148 @@ /* like chmod, but avoid changing permission of /dev/null */ int UTIL_chmod(char const* filename, const stat_t* statbuf, mode_t permissions) +{ + return UTIL_fchmod(-1, filename, statbuf, permissions); +} + +int UTIL_fchmod(const int fd, char const* filename, const stat_t* statbuf, mode_t permissions) { stat_t localStatBuf; + UTIL_TRACE_CALL("UTIL_chmod(%s, %#4o)", filename, (unsigned)permissions); if (statbuf == NULL) { - if (!UTIL_stat(filename, &localStatBuf)) return 0; + if (!UTIL_fstat(fd, filename, &localStatBuf)) { + UTIL_TRACE_RET(0); + return 0; + } statbuf = &localStatBuf; } - if (!UTIL_isRegularFileStat(statbuf)) return 0; /* pretend success, but don't change anything */ - return chmod(filename, permissions); + if (!UTIL_isRegularFileStat(statbuf)) { + UTIL_TRACE_RET(0); + return 0; /* pretend success, but don't change anything */ + } +#ifdef ZSTD_HAVE_FCHMOD + if (fd >= 0) { + int ret; + UTIL_TRACE_CALL("fchmod"); + ret = fchmod(fd, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } else +#endif + { + int ret; + UTIL_TRACE_CALL("chmod"); + ret = chmod(filename, permissions); + UTIL_TRACE_RET(ret); + UTIL_TRACE_RET(ret); + return ret; + } } /* set access and modification times */ int UTIL_utime(const char* filename, const stat_t *statbuf) { int ret; + UTIL_TRACE_CALL("UTIL_utime(%s)", filename); /* We check that st_mtime is a macro here in order to give us confidence * that struct stat has a struct timespec st_mtim member. We need this * check because there are some platforms that claim to be POSIX 2008 * compliant but which do not have st_mtim... */ #if (PLATFORM_POSIX_VERSION >= 200809L) && defined(st_mtime) - /* (atime, mtime) */ - struct timespec timebuf[2] = { {0, UTIME_NOW} }; - timebuf[1] = statbuf->st_mtim; - ret = utimensat(AT_FDCWD, filename, timebuf, 0); + { + /* (atime, mtime) */ + struct timespec timebuf[2] = { {0, UTIME_NOW} }; + timebuf[1] = statbuf->st_mtim; + ret = utimensat(AT_FDCWD, filename, timebuf, 0); + } #else - struct utimbuf timebuf; - timebuf.actime = time(NULL); - timebuf.modtime = statbuf->st_mtime; - ret = utime(filename, &timebuf); + { + struct utimbuf timebuf; + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + ret = utime(filename, &timebuf); + } #endif errno = 0; + UTIL_TRACE_RET(ret); return ret; } int UTIL_setFileStat(const char *filename, const stat_t *statbuf) { - int res = 0; + return UTIL_setFDStat(-1, filename, statbuf); +} +int UTIL_setFDStat(const int fd, const char *filename, const stat_t *statbuf) +{ + int res = 0; stat_t curStatBuf; - if (!UTIL_stat(filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) + UTIL_TRACE_CALL("UTIL_setFileStat(%d, %s)", fd, filename); + + if (!UTIL_fstat(fd, filename, &curStatBuf) || !UTIL_isRegularFileStat(&curStatBuf)) { + UTIL_TRACE_RET(-1); return -1; + } - /* set access and modification times */ - res += UTIL_utime(filename, statbuf); + /* Mimic gzip's behavior: + * + * "Change the group first, then the permissions, then the owner. + * That way, the permissions will be correct on systems that allow + * users to give away files, without introducing a security hole. + * Security depends on permissions not containing the setuid or + * setgid bits." */ #if !defined(_WIN32) - res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, -1, statbuf->st_gid); /* Apply group ownership */ + } else +#endif + { + res += chown(filename, -1, statbuf->st_gid); /* Apply group ownership */ + } #endif - res += UTIL_chmod(filename, &curStatBuf, statbuf->st_mode & 07777); /* Copy file permissions */ + res += UTIL_fchmod(fd, filename, &curStatBuf, statbuf->st_mode & 0777); /* Copy file permissions */ + +#if !defined(_WIN32) +#ifdef ZSTD_HAVE_FCHOWN + if (fd >= 0) { + res += fchown(fd, statbuf->st_uid, -1); /* Apply user ownership */ + } else +#endif + { + res += chown(filename, statbuf->st_uid, -1); /* Apply user ownership */ + } +#endif errno = 0; + UTIL_TRACE_RET(-res); return -res; /* number of errors is returned */ } int UTIL_isDirectory(const char* infilename) { stat_t statbuf; - return UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + int ret; + UTIL_TRACE_CALL("UTIL_isDirectory(%s)", infilename); + ret = UTIL_stat(infilename, &statbuf) && UTIL_isDirectoryStat(&statbuf); + UTIL_TRACE_RET(ret); + return ret; } int UTIL_isDirectoryStat(const stat_t* statbuf) { + int ret; + UTIL_TRACE_CALL("UTIL_isDirectoryStat()"); #if defined(_MSC_VER) - return (statbuf->st_mode & _S_IFDIR) != 0; + ret = (statbuf->st_mode & _S_IFDIR) != 0; #else - return S_ISDIR(statbuf->st_mode) != 0; + ret = S_ISDIR(statbuf->st_mode) != 0; #endif + UTIL_TRACE_RET(ret); + return ret; } int UTIL_compareStr(const void *p1, const void *p2) { @@ -224,33 +361,68 @@ int UTIL_isSameFile(const char* fName1, const char* fName2) { + int ret; assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFile(%s, %s)", fName1, fName2); #if defined(_MSC_VER) || defined(_WIN32) /* note : Visual does not support file identification by inode. * inode does not work on Windows, even with a posix layer, like msys2. * The following work-around is limited to detecting exact name repetition only, * aka `filename` is considered different from `subdir/../filename` */ - return !strcmp(fName1, fName2); + ret = !strcmp(fName1, fName2); #else { stat_t file1Stat; stat_t file2Stat; - return UTIL_stat(fName1, &file1Stat) + ret = UTIL_stat(fName1, &file1Stat) && UTIL_stat(fName2, &file2Stat) - && (file1Stat.st_dev == file2Stat.st_dev) - && (file1Stat.st_ino == file2Stat.st_ino); + && UTIL_isSameFileStat(fName1, fName2, &file1Stat, &file2Stat); } #endif + UTIL_TRACE_RET(ret); + return ret; +} + +int UTIL_isSameFileStat( + const char* fName1, const char* fName2, + const stat_t* file1Stat, const stat_t* file2Stat) +{ + int ret; + assert(fName1 != NULL); assert(fName2 != NULL); + UTIL_TRACE_CALL("UTIL_isSameFileStat(%s, %s)", fName1, fName2); +#if defined(_MSC_VER) || defined(_WIN32) + /* note : Visual does not support file identification by inode. + * inode does not work on Windows, even with a posix layer, like msys2. + * The following work-around is limited to detecting exact name repetition only, + * aka `filename` is considered different from `subdir/../filename` */ + (void)file1Stat; + (void)file2Stat; + ret = !strcmp(fName1, fName2); +#else + { + ret = (file1Stat->st_dev == file2Stat->st_dev) + && (file1Stat->st_ino == file2Stat->st_ino); + } +#endif + UTIL_TRACE_RET(ret); + return ret; } /* UTIL_isFIFO : distinguish named pipes */ int UTIL_isFIFO(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isFIFO(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) return 1; + { + stat_t statbuf; + if (UTIL_stat(infilename, &statbuf) && UTIL_isFIFOStat(&statbuf)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } @@ -278,21 +450,69 @@ int UTIL_isLink(const char* infilename) { + UTIL_TRACE_CALL("UTIL_isLink(%s)", infilename); /* macro guards, as defined in : https://linux.die.net/man/2/lstat */ #if PLATFORM_POSIX_VERSION >= 200112L - stat_t statbuf; - int const r = lstat(infilename, &statbuf); - if (!r && S_ISLNK(statbuf.st_mode)) return 1; + { + stat_t statbuf; + int const r = lstat(infilename, &statbuf); + if (!r && S_ISLNK(statbuf.st_mode)) { + UTIL_TRACE_RET(1); + return 1; + } + } #endif (void)infilename; + UTIL_TRACE_RET(0); return 0; } +static int g_fakeStdinIsConsole = 0; +static int g_fakeStderrIsConsole = 0; +static int g_fakeStdoutIsConsole = 0; + +int UTIL_isConsole(FILE* file) +{ + int ret; + UTIL_TRACE_CALL("UTIL_isConsole(%d)", fileno(file)); + if (file == stdin && g_fakeStdinIsConsole) + ret = 1; + else if (file == stderr && g_fakeStderrIsConsole) + ret = 1; + else if (file == stdout && g_fakeStdoutIsConsole) + ret = 1; + else + ret = IS_CONSOLE(file); + UTIL_TRACE_RET(ret); + return ret; +} + +void UTIL_fakeStdinIsConsole(void) +{ + g_fakeStdinIsConsole = 1; +} +void UTIL_fakeStdoutIsConsole(void) +{ + g_fakeStdoutIsConsole = 1; +} +void UTIL_fakeStderrIsConsole(void) +{ + g_fakeStderrIsConsole = 1; +} + U64 UTIL_getFileSize(const char* infilename) { stat_t statbuf; - if (!UTIL_stat(infilename, &statbuf)) return UTIL_FILESIZE_UNKNOWN; - return UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_CALL("UTIL_getFileSize(%s)", infilename); + if (!UTIL_stat(infilename, &statbuf)) { + UTIL_TRACE_RET(-1); + return UTIL_FILESIZE_UNKNOWN; + } + { + U64 const size = UTIL_getFileSizeStat(&statbuf); + UTIL_TRACE_RET((int)size); + return size; + } } U64 UTIL_getFileSizeStat(const stat_t* statbuf) @@ -369,11 +589,16 @@ { U64 total = 0; unsigned n; + UTIL_TRACE_CALL("UTIL_getTotalFileSize(%u)", nbFiles); for (n=0; ntableSize; i++) { + if(!strcmp(table->fileNames[i], name)) { + return (int)i; + } + } + return -1; +} + void UTIL_refFilename(FileNamesTable* fnt, const char* filename) { assert(fnt->tableSize < fnt->tableCapacity); @@ -559,7 +794,7 @@ for( idx2=0 ; (idx2 < table2->tableSize) && table2->fileNames[idx2] && (pos < newTotalTableSize) ; ++idx2, ++newTableIdx) { size_t const curLen = strlen(table2->fileNames[idx2]); memcpy(buf+pos, table2->fileNames[idx2], curLen); - assert(newTableIdx <= newTable->tableSize); + assert(newTableIdx < newTable->tableSize); newTable->fileNames[newTableIdx] = buf+pos; pos += curLen+1; } } @@ -683,8 +918,11 @@ ptrdiff_t newListSize = (*bufEnd - *bufStart) + LIST_SIZE_INCREASE; assert(newListSize >= 0); *bufStart = (char*)UTIL_realloc(*bufStart, (size_t)newListSize); - *bufEnd = *bufStart + newListSize; - if (*bufStart == NULL) { free(path); closedir(dir); return 0; } + if (*bufStart != NULL) { + *bufEnd = *bufStart + newListSize; + } else { + free(path); closedir(dir); return 0; + } } if (*bufStart + *pos + pathLength < *bufEnd) { memcpy(*bufStart + *pos, path, pathLength + 1); /* with final \0 */ @@ -870,30 +1108,30 @@ static char* mallocAndJoin2Dir(const char *dir1, const char *dir2) { - const size_t dir1Size = strlen(dir1); - const size_t dir2Size = strlen(dir2); - char *outDirBuffer, *buffer, trailingChar; - assert(dir1 != NULL && dir2 != NULL); - outDirBuffer = (char *) malloc(dir1Size + dir2Size + 2); - CONTROL(outDirBuffer != NULL); + { const size_t dir1Size = strlen(dir1); + const size_t dir2Size = strlen(dir2); + char *outDirBuffer, *buffer; - memcpy(outDirBuffer, dir1, dir1Size); - outDirBuffer[dir1Size] = '\0'; + outDirBuffer = (char *) malloc(dir1Size + dir2Size + 2); + CONTROL(outDirBuffer != NULL); - if (dir2[0] == '.') - return outDirBuffer; + memcpy(outDirBuffer, dir1, dir1Size); + outDirBuffer[dir1Size] = '\0'; - buffer = outDirBuffer + dir1Size; - trailingChar = *(buffer - 1); - if (trailingChar != PATH_SEP) { - *buffer = PATH_SEP; - buffer++; - } - memcpy(buffer, dir2, dir2Size); - buffer[dir2Size] = '\0'; + if (dir2[0] == '.') + return outDirBuffer; - return outDirBuffer; + buffer = outDirBuffer + dir1Size; + if (dir1Size > 0 && *(buffer - 1) != PATH_SEP) { + *buffer = PATH_SEP; + buffer++; + } + memcpy(buffer, dir2, dir2Size); + buffer[dir2Size] = '\0'; + + return outDirBuffer; + } } /* this function will return NULL if input srcFileName is not valid name for mirrored output path */ @@ -999,7 +1237,7 @@ trimPath(currDirName))) uniqueDirNr++; - /* we need maintain original src dir name instead of trimmed + /* we need to maintain original src dir name instead of trimmed * dir, so we can retrieve the original src dir's mode_t */ uniqueDirNames[uniqueDirNr - 1] = currDirName; } @@ -1378,6 +1616,9 @@ int UTIL_countCores(int logical) { + /* suppress unused parameter warning */ + (void)logical; + /* assume 1 */ return 1; } diff --git a/sys/contrib/zstd/programs/zstd.1 b/sys/contrib/zstd/programs/zstd.1 --- a/sys/contrib/zstd/programs/zstd.1 +++ b/sys/contrib/zstd/programs/zstd.1 @@ -1,501 +1,381 @@ -. -.TH "ZSTD" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTD" "1" "March 2023" "zstd 1.5.5" "User Commands" .SH "NAME" \fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files -. .SH "SYNOPSIS" -\fBzstd\fR [\fIOPTIONS\fR] [\-|\fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] -. +.TS +allbox; +\fBzstd\fR [\fIOPTIONS\fR] [\- \fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR] +.TE .P \fBzstdmt\fR is equivalent to \fBzstd \-T0\fR -. .P \fBunzstd\fR is equivalent to \fBzstd \-d\fR -. .P \fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR -. .SH "DESCRIPTION" -\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, with fast modes at > 200 MB/s per core, and strong modes nearing lzma compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. -. +\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip\fR(1) and \fBxz\fR(1)\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, from fast modes at > 200 MB/s per core, to strong modes with excellent compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\. .P -\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences : -. -.IP "\(bu" 4 +\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences: +.IP "\[ci]" 4 Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\. -. -.IP "\(bu" 4 -\fBzstd\fR does not accept input from console, but it properly accepts \fBstdin\fR when it\'s not the console\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\. -. +.IP "\[ci]" 4 +\fBzstd\fR does not accept input from console, though it does accept \fBstdin\fR when it\'s not the console\. +.IP "\[ci]" 4 +\fBzstd\fR does not store the input\'s filename or attributes, only its contents\. .IP "" 0 -. .P -\fBzstd\fR compresses or decompresses each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. -. +\fBzstd\fR processes each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal: it will display an error message and skip the file\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\. .P Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name: -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When compressing, the suffix \fB\.zst\fR is appended to the source filename to get the target filename\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 When decompressing, the \fB\.zst\fR suffix is removed from the source filename to get the target filename -. .IP "" 0 -. -.SS "Concatenation with \.zst files" -It is possible to concatenate \fB\.zst\fR files as is\. \fBzstd\fR will decompress such files as if they were a single \fB\.zst\fR file\. -. +.SS "Concatenation with \.zst Files" +It is possible to concatenate multiple \fB\.zst\fR files\. \fBzstd\fR will decompress such agglomerated file as if it was a single \fB\.zst\fR file\. .SH "OPTIONS" -. -.SS "Integer suffixes and special values" +.SS "Integer Suffixes and Special Values" In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\. -. .TP \fBKiB\fR -Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. -. +Multiply the integer by 1,024 (2\e^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\. .TP \fBMiB\fR -Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. -. -.SS "Operation mode" +Multiply the integer by 1,048,576 (2\e^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\. +.SS "Operation Mode" If multiple operation mode options are given, the last one takes effect\. -. .TP \fB\-z\fR, \fB\-\-compress\fR Compress\. This is the default operation mode when no operation mode option is specified and no other operation mode is implied from the command name (for example, \fBunzstd\fR implies \fB\-\-decompress\fR)\. -. .TP \fB\-d\fR, \fB\-\-decompress\fR, \fB\-\-uncompress\fR Decompress\. -. .TP \fB\-t\fR, \fB\-\-test\fR -Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout\fR except that the decompressed data is discarded instead of being written to standard output\. No files are created or removed\. -. +Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout > /dev/null\fR, decompressed data is discarded and checksummed for errors\. No files are created or removed\. .TP \fB\-b#\fR -Benchmark file(s) using compression level # -. +Benchmark file(s) using compression level \fI#\fR\. See \fIBENCHMARK\fR below for a description of this operation\. .TP -\fB\-\-train FILEs\fR -Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. -. +\fB\-\-train FILES\fR +Use \fIFILES\fR as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\. See \fIDICTIONARY BUILDER\fR below for a description of this operation\. .TP \fB\-l\fR, \fB\-\-list\fR -Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command can be augmented with the \fB\-v\fR modifier\. -. -.SS "Operation modifiers" -. -.IP "\(bu" 4 -\fB\-#\fR: \fB#\fR compression level [1\-19] (default: 3) -. -.IP "\(bu" 4 +Display information related to a zstd compressed file, such as size, ratio, and checksum\. Some of these fields may not be available\. This command\'s output can be augmented with the \fB\-v\fR modifier\. +.SS "Operation Modifiers" +.IP "\[ci]" 4 +\fB\-#\fR: selects \fB#\fR compression level [1\-19] (default: 3) +.IP "\[ci]" 4 \fB\-\-ultra\fR: unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-fast[=#]\fR: switch to ultra\-fast compression levels\. If \fB=#\fR is not present, it defaults to \fB1\fR\. The higher the value, the faster the compression speed, at the cost of some compression ratio\. This setting overwrites compression level if one was set previously\. Similarly, if a compression level is set after \fB\-\-fast\fR, it overrides it\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-T#\fR, \fB\-\-threads=#\fR: Compress using \fB#\fR working threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. In all cases, the nb of threads is capped to \fBZSTDMT_NBWORKERS_MAX\fR, which is either 64 in 32\-bit mode, or 256 for 64\-bit environments\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\. -. -.IP "\(bu" 4 -\fB\-\-single\-thread\fR: Does not spawn a thread for compression, use a single thread for both I/O and compression\. In this mode, compression is serialized with I/O, which is slightly slower\. (This is different from \fB\-T1\fR, which spawns 1 compression thread in parallel of I/O)\. This mode is the only one available when multithread support is disabled\. Single\-thread mode features lower memory usage\. Final compressed result is slightly different from \fB\-T1\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-single\-thread\fR: Use a single thread for both I/O and compression\. As compression is serialized with I/O, this can be slightly slower\. Single\-thread mode features significantly lower memory usage, which can be useful for systems with limited amount of memory, such as 32\-bit systems\. +.IP +Note 1: this mode is the only available one when multithread support is disabled\. +.IP +Note 2: this mode is different from \fB\-T1\fR, which spawns 1 compression thread in parallel with I/O\. Final compressed result is also slightly different from \fB\-T1\fR\. +.IP "\[ci]" 4 \fB\-\-auto\-threads={physical,logical} (default: physical)\fR: When using a default amount of threads via \fB\-T0\fR, choose the default based on the number of detected physical or logical cores\. -. -.IP "\(bu" 4 -\fB\-\-adapt[=min=#,max=#]\fR : \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. \fInote\fR : at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. -. -.IP "\(bu" 4 -\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if not \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. -. +.IP "\[ci]" 4 +\fB\-\-adapt[=min=#,max=#]\fR: \fBzstd\fR will dynamically adapt compression level to perceived I/O conditions\. Compression level adaptation can be observed live by using command \fB\-v\fR\. Adaptation can be constrained between supplied \fBmin\fR and \fBmax\fR levels\. The feature works when combined with multi\-threading and \fB\-\-long\fR mode\. It does not work with \fB\-\-single\-thread\fR\. It sets window size to 8 MiB by default (can be changed manually, see \fBwlog\fR)\. Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible\. +.IP +\fINote\fR: at the time of this writing, \fB\-\-adapt\fR can remain stuck at low speed when combined with multiple worker threads (>=2)\. +.IP "\[ci]" 4 +\fB\-\-long[=#]\fR: enables long distance matching with \fB#\fR \fBwindowLog\fR, if \fB#\fR is not present it defaults to \fB27\fR\. This increases the window size (\fBwindowLog\fR) and memory usage for both the compressor and decompressor\. This setting is designed to improve the compression ratio for files with long matches at a large distance\. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-D DICT\fR: use \fBDICT\fR as Dictionary to compress or decompress FILE(s) -. -.IP "\(bu" 4 -\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that windowSize > srcSize\. -. -.IP -Note: cannot use both this and \-D together Note: \fB\-\-long\fR mode will be automatically activated if chainLog < fileLog (fileLog being the windowLog required to cover the whole file)\. You can also manually force it\. Node: for all levels, you can use \-\-patch\-from in \-\-single\-thread mode to improve compression ratio at the cost of speed Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e 4096), and by setting a large \fB\-\-zstd=chainLog=\fR -. -.IP "\(bu" 4 -\fB\-\-rsyncable\fR : \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and the faster compression levels will see a small compression speed hit\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-patch\-from FILE\fR: Specify the file to be used as a reference point for zstd\'s diff engine\. This is effectively dictionary compression with some convenient parameter selection, namely that \fIwindowSize\fR > \fIsrcSize\fR\. +.IP +Note: cannot use both this and \fB\-D\fR together\. +.IP +Note: \fB\-\-long\fR mode will be automatically activated if \fIchainLog\fR < \fIfileLog\fR (\fIfileLog\fR being the \fIwindowLog\fR required to cover the whole file)\. You can also manually force it\. +.IP +Note: for all levels, you can use \fB\-\-patch\-from\fR in \fB\-\-single\-thread\fR mode to improve compression ratio at the cost of speed\. +.IP +Note: for level 19, you can get increased compression ratio at the cost of speed by specifying \fB\-\-zstd=targetLength=\fR to be something large (i\.e\. 4096), and by setting a large \fB\-\-zstd=chainLog=\fR\. +.IP "\[ci]" 4 +\fB\-\-rsyncable\fR: \fBzstd\fR will periodically synchronize the compression state to make the compressed file more rsync\-friendly\. There is a negligible impact to compression ratio, and a potential impact to compression speed, perceptible at higher speeds, for example when combining \fB\-\-rsyncable\fR with many parallel worker threads\. This feature does not work with \fB\-\-single\-thread\fR\. You probably don\'t want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary\. +.IP "\[ci]" 4 \fB\-C\fR, \fB\-\-[no\-]check\fR: add integrity check computed from uncompressed data (default: enabled) -. -.IP "\(bu" 4 -\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \-\-content\-size (meaning that the original size will be placed in the header)\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-[no\-]content\-size\fR: enable / disable whether or not the original size of the file is placed in the header of the compressed file\. The default option is \fB\-\-content\-size\fR (meaning that the original size will be placed in the header)\. +.IP "\[ci]" 4 \fB\-\-no\-dictID\fR: do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\. -. -.IP "\(bu" 4 -\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, Zstandard uses 128 MB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (ie\. you can increase or decrease it)\. -. -.IP -This is also used during compression when using with \-\-patch\-from=\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MB)\. -. -.IP -Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GB\. zstd will load training samples up to the memory limit and ignore the rest\. -. -.IP "\(bu" 4 -\fB\-\-stream\-size=#\fR : Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-M#\fR, \fB\-\-memory=#\fR: Set a memory usage limit\. By default, \fBzstd\fR uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can override this manually if need be in either direction (i\.e\. you can increase or decrease it)\. +.IP +This is also used during compression when using with \fB\-\-patch\-from=\fR\. In this case, this parameter overrides that maximum size allowed for a dictionary\. (128 MiB)\. +.IP +Additionally, this can be used to limit memory for dictionary training\. This parameter overrides the default limit of 2 GiB\. zstd will load training samples up to the memory limit and ignore the rest\. +.IP "\[ci]" 4 +\fB\-\-stream\-size=#\fR: Sets the pledged source size of input coming from a stream\. This value must be exact, as it will be included in the produced frame header\. Incorrect stream sizes will cause an error\. This information will be used to better optimize compression parameters, resulting in better and potentially faster compression, especially for smaller source sizes\. +.IP "\[ci]" 4 \fB\-\-size\-hint=#\fR: When handling input from a stream, \fBzstd\fR must guess how large the source size will be when optimizing compression parameters\. If the stream size is relatively small, this guess may be a poor one, resulting in a higher compression ratio than expected\. This feature allows for controlling the guess when needed\. Exact guesses result in better compression ratios\. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation\. -. -.IP "\(bu" 4 -\fB\-o FILE\fR: save result into \fBFILE\fR -. -.IP "\(bu" 4 -\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. -. -.IP "\(bu" 4 -\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console) -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-o FILE\fR: save result into \fBFILE\fR\. +.IP "\[ci]" 4 +\fB\-f\fR, \fB\-\-force\fR: disable input and output checks\. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc\. During decompression and when the output destination is stdout, pass\-through unrecognized formats as\-is\. +.IP "\[ci]" 4 +\fB\-c\fR, \fB\-\-stdout\fR: write to standard output (even if it is the console); keep original files unchanged\. +.IP "\[ci]" 4 \fB\-\-[no\-]sparse\fR: enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default: enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\. -. -.IP "\(bu" 4 -\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. If used in combination with \-o, will trigger a confirmation prompt (which can be silenced with \-f), as this is a destructive operation\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-[no\-]pass\-through\fR enable / disable passing through uncompressed files as\-is\. During decompression when pass\-through is enabled, unrecognized formats will be copied as\-is from the input to the output\. By default, pass\-through will occur when the output destination is stdout and the force (\fB\-f\fR) option is set\. +.IP "\[ci]" 4 +\fB\-\-rm\fR: remove source file(s) after successful compression or decompression\. This command is silently ignored if output is \fBstdout\fR\. If used in combination with \fB\-o\fR, triggers a confirmation prompt (which can be silenced with \fB\-f\fR), as this is a destructive operation\. +.IP "\[ci]" 4 \fB\-k\fR, \fB\-\-keep\fR: keep source file(s) after successful compression or decompression\. This is the default behavior\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-r\fR: operate recursively on directories\. It selects all files in the named directory and all its subdirectories\. This can be useful both to reduce command line typing, and to circumvent shell expansion limitations, when there are a lot of files and naming breaks the maximum size of a command line\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-filelist FILE\fR read a list of files to process as content from \fBFILE\fR\. Format is compatible with \fBls\fR output, with one file per line\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-flat DIR\fR: resulting files are stored into target \fBDIR\fR directory, instead of same directory as origin file\. Be aware that this command can introduce name collision issues, if multiple files, from different directories, end up having the same name\. Collision resolution ensures first file with a given name will be present in \fBDIR\fR, while in combination with \fB\-f\fR, the last file will be present instead\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-output\-dir\-mirror DIR\fR: similar to \fB\-\-output\-dir\-flat\fR, the output files are stored underneath target \fBDIR\fR directory, but this option will replicate input directory hierarchy into output \fBDIR\fR\. -. .IP If input directory contains "\.\.", the files in this directory will be ignored\. If input directory is an absolute directory (i\.e\. "/var/tmp/abc"), it will be stored into the "output\-dir/var/tmp/abc"\. If there are multiple input files or directories, name collision resolution will follow the same rules as \fB\-\-output\-dir\-flat\fR\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-format=FORMAT\fR: compress and decompress in other formats\. If compiled with support, zstd can compress to or decompress from other compression algorithm formats\. Possibly available options are \fBzstd\fR, \fBgzip\fR, \fBxz\fR, \fBlzma\fR, and \fBlz4\fR\. If no such format is provided, \fBzstd\fR is the default\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR: display help/long help and exit -. -.IP "\(bu" 4 -\fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced : \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-V\fR, \fB\-\-version\fR: display version number and exit\. Advanced: \fB\-vV\fR also displays supported formats\. \fB\-vvV\fR also displays POSIX support\. \fB\-q\fR will only display the version number, suitable for machine reading\. +.IP "\[ci]" 4 \fB\-v\fR, \fB\-\-verbose\fR: verbose mode, display more information -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-q\fR, \fB\-\-quiet\fR: suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 \fB\-\-no\-progress\fR: do not display the progress bar, but keep all other messages\. -. -.IP "\(bu" 4 -\fB\-\-show\-default\-cparams\fR: Shows the default compression parameters that will be used for a particular src file\. If the provided src file is not a regular file (eg\. named pipe), the cli will just output the default parameters\. That is, the parameters that are used when the src size is unknown\. -. -.IP "\(bu" 4 +.IP "\[ci]" 4 +\fB\-\-show\-default\-cparams\fR: shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size\. If the provided file is not a regular file (e\.g\. a pipe), this flag will output the parameters used for inputs of unknown size\. +.IP "\[ci]" 4 \fB\-\-\fR: All arguments after \fB\-\-\fR are treated as files -. .IP "" 0 -. -.SS "Restricted usage of Environment Variables" -Using environment variables to set parameters has security implications\. Therefore, this avenue is intentionally restricted\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. -. +.SS "gzip Operation Modifiers" +When invoked via a \fBgzip\fR symlink, \fBzstd\fR will support further options that intend to mimic the \fBgzip\fR behavior: +.TP +\fB\-n\fR, \fB\-\-no\-name\fR +do not store the original filename and timestamps when compressing a file\. This is the default behavior and hence a no\-op\. +.TP +\fB\-\-best\fR +alias to the option \fB\-9\fR\. +.SS "Environment Variables" +Employing environment variables to set parameters has security implications\. Therefore, this avenue is intentionally limited\. Only \fBZSTD_CLEVEL\fR and \fBZSTD_NBTHREADS\fR are currently supported\. They set the compression level and number of threads to use during compression, respectively\. .P \fBZSTD_CLEVEL\fR can be used to set the level between 1 and 19 (the "normal" range)\. If the value of \fBZSTD_CLEVEL\fR is not a valid integer, it will be ignored with a warning message\. \fBZSTD_CLEVEL\fR just replaces the default compression level (\fB3\fR)\. -. .P \fBZSTD_NBTHREADS\fR can be used to set the number of threads \fBzstd\fR will attempt to use during compression\. If the value of \fBZSTD_NBTHREADS\fR is not a valid unsigned integer, it will be ignored with a warning message\. \fBZSTD_NBTHREADS\fR has a default value of (\fB1\fR), and is capped at ZSTDMT_NBWORKERS_MAX==200\. \fBzstd\fR must be compiled with multithread support for this to have any effect\. -. .P They can both be overridden by corresponding command line arguments: \fB\-#\fR for compression level and \fB\-T#\fR for number of compression threads\. -. .SH "DICTIONARY BUILDER" -\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. -. +\fBzstd\fR offers \fIdictionary\fR compression, which greatly improves efficiency on small files and messages\. It\'s possible to train \fBzstd\fR with a set of samples, the result of which is saved into a file called a \fBdictionary\fR\. Then, during compression and decompression, reference the same dictionary, using command \fB\-D dictionaryFileName\fR\. Compression of small files similar to the sample set will be greatly improved\. .TP \fB\-\-train FILEs\fR -Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. -. +Use FILEs as training set to create a dictionary\. The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size (for example, ~10 MB for a 100 KB dictionary)\. \fB\-\-train\fR can be combined with \fB\-r\fR to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits\. +.IP +Since dictionary compression is mostly effective for small files, the expectation is that the training set will only contain small files\. In the case where some samples happen to be large, only the first 128 KiB of these samples will be used for training\. .IP -\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. -. +\fB\-\-train\fR supports multithreading if \fBzstd\fR is compiled with threading support (default)\. Additional advanced parameters can be specified with \fB\-\-train\-fastcover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. The slower cover dictionary builder can be accessed with \fB\-\-train\-cover\fR\. Default \fB\-\-train\fR is equivalent to \fB\-\-train\-fastcover=d=8,steps=4\fR\. .TP -\fB\-o file\fR -Dictionary saved into \fBfile\fR (default name: dictionary)\. -. +\fB\-o FILE\fR +Dictionary saved into \fBFILE\fR (default name: dictionary)\. .TP \fB\-\-maxdict=#\fR -Limit dictionary to specified size (default: 112640)\. -. +Limit dictionary to specified size (default: 112640 bytes)\. As usual, quantities are expressed in bytes by default, and it\'s possible to employ suffixes (like \fBKB\fR or \fBMB\fR) to specify larger values\. .TP \fB\-#\fR Use \fB#\fR compression level during training (optional)\. Will generate statistics more tuned for selected compression level, resulting in a \fIsmall\fR compression ratio improvement for this level\. -. .TP \fB\-B#\fR Split input files into blocks of size # (default: no split) -. .TP \fB\-M#\fR, \fB\-\-memory=#\fR -Limit the amount of sample data loaded for training (default: 2 GB)\. See above for details\. -. +Limit the amount of sample data loaded for training (default: 2 GB)\. Note that the default (2 GB) is also the maximum\. This parameter can be useful in situations where the training set size is not well controlled and could be potentially very large\. Since speed of the training process is directly correlated to the size of the training sample set, a smaller sample set leads to faster training\. +.IP +In situations where the training set is larger than maximum memory, the CLI will randomly select samples among the available ones, up to the maximum allowed memory budget\. This is meant to improve dictionary relevance by mitigating the potential impact of clustering, such as selecting only files from the beginning of a list sorted by modification date, or sorted by alphabetical order\. The randomization process is deterministic, so training of the same list of files with the same parameters will lead to the creation of the same dictionary\. .TP \fB\-\-dictID=#\fR -A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. -. +A dictionary ID is a locally unique ID\. The decoder will use this value to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to provide an explicit number ID instead\. It\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\. Note that short numbers have an advantage: an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. +.IP +Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\e^31, so they should not be used in public\. .TP \fB\-\-train\-cover[=k#,d=#,steps=#,split=#,shrink[=#]]\fR Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. If \fIsplit\fR is not specified or split <= 0, then the default value of 100 is used\. Requires that \fId\fR <= \fIk\fR\. If \fIshrink\fR flag is not used, then the default value for \fIshrinkDict\fR of 0 is used\. If \fIshrink\fR is not specified, then the default value for \fIshrinkDictMaxRegression\fR of 1 is used\. -. .IP Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. If \fIsplit\fR is 100, all input samples are used for both training and testing to find optimal \fId\fR and \fIk\fR to build dictionary\. Supports multithreading if \fBzstd\fR is compiled with threading support\. Having \fIshrink\fR enabled takes a truncated dictionary of minimum size and doubles in size until compression ratio of the truncated dictionary is at most \fIshrinkDictMaxRegression%\fR worse than the compression ratio of the largest dictionary\. -. .IP Examples: -. .IP \fBzstd \-\-train\-cover FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR -. .IP \fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50 FILEs\fR -. .IP \fBzstd \-\-train\-cover=k=50,split=60 FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink FILEs\fR -. .IP \fBzstd \-\-train\-cover=shrink=2 FILEs\fR -. .TP \fB\-\-train\-fastcover[=k#,d=#,f=#,steps=#,split=#,accel=#]\fR Same as cover but with extra parameters \fIf\fR and \fIaccel\fR and different default value of split If \fIsplit\fR is not specified, then it tries \fIsplit\fR = 75\. If \fIf\fR is not specified, then it tries \fIf\fR = 20\. Requires that 0 < \fIf\fR < 32\. If \fIaccel\fR is not specified, then it tries \fIaccel\fR = 1\. Requires that 0 < \fIaccel\fR <= 10\. Requires that \fId\fR = 6 or \fId\fR = 8\. -. .IP \fIf\fR is log of size of array that keeps track of frequency of subsegments of size \fId\fR\. The subsegment is hashed to an index in the range [0,2^\fIf\fR \- 1]\. It is possible that 2 different subsegments are hashed to the same index, and they are considered as the same subsegment when computing frequency\. Using a higher \fIf\fR reduces collision but takes longer\. -. .IP Examples: -. .IP \fBzstd \-\-train\-fastcover FILEs\fR -. .IP \fBzstd \-\-train\-fastcover=d=8,f=15,accel=2 FILEs\fR -. .TP \fB\-\-train\-legacy[=selectivity=#]\fR -Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its possible maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. -. +Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its achievable maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\. .IP Examples: -. .IP \fBzstd \-\-train\-legacy FILEs\fR -. .IP \fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR -. .SH "BENCHMARK" -. .TP \fB\-b#\fR benchmark file(s) using compression level # -. .TP \fB\-e#\fR benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive) -. .TP \fB\-i#\fR minimum evaluation time, in seconds (default: 3s), benchmark mode only -. .TP \fB\-B#\fR, \fB\-\-block\-size=#\fR -cut file(s) into independent blocks of size # (default: no block) -. +cut file(s) into independent chunks of size # (default: no chunking) .TP \fB\-\-priority=rt\fR set process priority to real\-time -. .P -\fBOutput Format:\fR CompressionLevel#Filename : IntputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed -. +\fBOutput Format:\fR CompressionLevel#Filename: InputSize \-> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed .P \fBMethodology:\fR For both compression and decompression speed, the entire input is compressed/decompressed in\-memory to measure speed\. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy\. -. .SH "ADVANCED COMPRESSION OPTIONS" -. -.SS "\-B#:" -Select the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to (slightly) different compressed frames\. -. +### \-B#: Specify the size of each compression job\. This parameter is only available when multi\-threading is enabled\. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads\. Default job size varies depending on compression level (generally \fB4 * windowSize\fR)\. \fB\-B#\fR makes it possible to manually select a custom size\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 512 KB, or \fBoverlapSize\fR, whichever is largest\. Different job sizes will lead to non\-identical compressed frames\. .SS "\-\-zstd[=options]:" -\fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: -. +\fBzstd\fR provides 22 predefined regular compression levels plus the fast levels\. This compression level is translated internally into a number of specific parameters that actually control the behavior of the compressor\. (You can see the result of this translation with \fB\-\-show\-default\-cparams\fR\.) These specific parameters can be overridden with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR: .TP \fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR Specify a strategy used by a match finder\. -. .IP -There are 9 strategies numbered from 1 to 9, from faster to stronger: 1=ZSTD_fast, 2=ZSTD_dfast, 3=ZSTD_greedy, 4=ZSTD_lazy, 5=ZSTD_lazy2, 6=ZSTD_btlazy2, 7=ZSTD_btopt, 8=ZSTD_btultra, 9=ZSTD_btultra2\. -. +There are 9 strategies numbered from 1 to 9, from fastest to strongest: 1=\fBZSTD_fast\fR, 2=\fBZSTD_dfast\fR, 3=\fBZSTD_greedy\fR, 4=\fBZSTD_lazy\fR, 5=\fBZSTD_lazy2\fR, 6=\fBZSTD_btlazy2\fR, 7=\fBZSTD_btopt\fR, 8=\fBZSTD_btultra\fR, 9=\fBZSTD_btultra2\fR\. .TP \fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR Specify the maximum number of bits for a match distance\. -. .IP The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 30 (1 GiB) on 32\-bit platforms and 31 (2 GiB) on 64\-bit platforms\. -. .IP Note: If \fBwindowLog\fR is set to larger than 27, \fB\-\-long=windowLog\fR or \fB\-\-memory=windowSize\fR needs to be passed to the decompressor\. -. .TP \fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR Specify the maximum number of bits for a hash table\. -. .IP -Bigger hash tables cause less collisions which usually makes compression faster, but requires more memory during compression\. -. +Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression\. .IP -The minimum \fIhlog\fR is 6 (64 B) and the maximum is 30 (1 GiB)\. -. +The minimum \fIhlog\fR is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB)\. .TP \fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR -Specify the maximum number of bits for a hash chain or a binary tree\. -. +Specify the maximum number of bits for the secondary search structure, whose form depends on the selected \fBstrategy\fR\. .IP -Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\. -. +Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the \fBZSTD_fast\fR \fBstrategy\fR, which only has the primary hash table\. .IP -The minimum \fIclog\fR is 6 (64 B) and the maximum is 29 (524 Mib) on 32\-bit platforms and 30 (1 Gib) on 64\-bit platforms\. -. +The minimum \fIclog\fR is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32\-bit platforms and 30 (1B entries / 4 GiB) on 64\-bit platforms\. .TP \fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\. -. .IP More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\. -. .IP The minimum \fIslog\fR is 1 and the maximum is \'windowLog\' \- 1\. -. .TP \fBminMatch\fR=\fImml\fR, \fBmml\fR=\fImml\fR Specify the minimum searched length of a match in a hash table\. -. .IP Larger search lengths usually decrease compression ratio but improve decompression speed\. -. .IP The minimum \fImml\fR is 3 and the maximum is 7\. -. .TP \fBtargetLength\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR The impact of this field vary depending on selected strategy\. -. .IP -For ZSTD_btopt, ZSTD_btultra and ZSTD_btultra2, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. t For ZSTD_fast, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed : a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. -. +For \fBZSTD_btopt\fR, \fBZSTD_btultra\fR and \fBZSTD_btultra2\fR, it specifies the minimum match length that causes match finder to stop searching\. A larger \fBtargetLength\fR usually improves compression ratio but decreases compression speed\. +.IP +For \fBZSTD_fast\fR, it triggers ultra\-fast mode when > 0\. The value represents the amount of data skipped between match sampling\. Impact is reversed: a larger \fBtargetLength\fR increases compression speed but decreases compression ratio\. .IP For all other strategies, this field has no impact\. -. .IP -The minimum \fItlen\fR is 0 and the maximum is 128 Kib\. -. +The minimum \fItlen\fR is 0 and the maximum is 128 KiB\. .TP \fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\. -. .IP -The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default" : \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. -. +The minimum \fIovlog\fR is 0, and the maximum is 9\. 1 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the reloaded amount by a factor 2\. For example, 8 means "windowSize/2", and 6 means "windowSize/8"\. Value 0 is special and means "default": \fIovlog\fR is automatically determined by \fBzstd\fR\. In which case, \fIovlog\fR will range from 6 to 9, depending on selected \fIstrat\fR\. .TP \fBldmHashLog\fR=\fIlhlog\fR, \fBlhlog\fR=\fIlhlog\fR Specify the maximum size for a hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Bigger hash tables usually improve compression ratio at the expense of more memory during compression and a decrease in compression speed\. -. .IP The minimum \fIlhlog\fR is 6 and the maximum is 30 (default: 20)\. -. .TP \fBldmMinMatch\fR=\fIlmml\fR, \fBlmml\fR=\fIlmml\fR Specify the minimum searched length of a match for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger/very small values usually decrease compression ratio\. -. .IP The minimum \fIlmml\fR is 4 and the maximum is 4096 (default: 64)\. -. .TP \fBldmBucketSizeLog\fR=\fIlblog\fR, \fBlblog\fR=\fIlblog\fR Specify the size of each bucket for the hash table used for long distance matching\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger bucket sizes improve collision resolution but decrease compression speed\. -. .IP The minimum \fIlblog\fR is 1 and the maximum is 8 (default: 3)\. -. .TP \fBldmHashRateLog\fR=\fIlhrlog\fR, \fBlhrlog\fR=\fIlhrlog\fR Specify the frequency of inserting entries into the long distance matching hash table\. -. .IP This option is ignored unless long distance matching is enabled\. -. .IP Larger values will improve compression speed\. Deviating far from the default value will likely result in a decrease in compression ratio\. -. .IP The default value is \fBwlog \- lhlog\fR\. -. .SS "Example" The following parameters sets advanced compression options to something similar to predefined level 19 for files bigger than 256 KB: -. .P \fB\-\-zstd\fR=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 -. +.SH "SEE ALSO" +\fBzstdgrep\fR(1), \fBzstdless\fR(1), \fBgzip\fR(1), \fBxz\fR(1) +.P +The \fIzstandard\fR format is specified in Y\. Collet, "Zstandard Compression and the \'application/zstd\' Media Type", https://www\.ietf\.org/rfc/rfc8878\.txt, Internet RFC 8878 (February 2021)\. .SH "BUGS" Report bugs at: https://github\.com/facebook/zstd/issues -. .SH "AUTHOR" Yann Collet diff --git a/sys/contrib/zstd/programs/zstd.1.md b/sys/contrib/zstd/programs/zstd.1.md --- a/sys/contrib/zstd/programs/zstd.1.md +++ b/sys/contrib/zstd/programs/zstd.1.md @@ -4,7 +4,7 @@ SYNOPSIS -------- -`zstd` [*OPTIONS*] [-|_INPUT-FILE_] [-o _OUTPUT-FILE_] +`zstd` [] [-|] [-o ] `zstdmt` is equivalent to `zstd -T0` @@ -16,32 +16,32 @@ DESCRIPTION ----------- `zstd` is a fast lossless compression algorithm and data compression tool, -with command line syntax similar to `gzip (1)` and `xz (1)`. +with command line syntax similar to `gzip`(1) and `xz`(1). It is based on the **LZ77** family, with further FSE & huff0 entropy stages. `zstd` offers highly configurable compression speed, -with fast modes at > 200 MB/s per core, -and strong modes nearing lzma compression ratios. +from fast modes at > 200 MB/s per core, +to strong modes with excellent compression ratios. It also features a very fast decoder, with speeds > 500 MB/s per core. `zstd` command line syntax is generally similar to gzip, -but features the following differences : +but features the following differences: - Source files are preserved by default. It's possible to remove them automatically by using the `--rm` command. - When compressing a single file, `zstd` displays progress notifications and result summary by default. Use `-q` to turn them off. - - `zstd` does not accept input from console, - but it properly accepts `stdin` when it's not the console. - `zstd` displays a short help page when command line is an error. Use `-q` to turn it off. + - `zstd` does not accept input from console, + though it does accept `stdin` when it's not the console. + - `zstd` does not store the input's filename or attributes, only its contents. -`zstd` compresses or decompresses each _file_ according to the selected -operation mode. +`zstd` processes each _file_ according to the selected operation mode. If no _files_ are given or _file_ is `-`, `zstd` reads from standard input and writes the processed data to standard output. `zstd` will refuse to write compressed data to standard output -if it is a terminal : it will display an error message and skip the _file_. +if it is a terminal: it will display an error message and skip the file. Similarly, `zstd` will refuse to read compressed data from standard input if it is a terminal. @@ -53,14 +53,15 @@ * When decompressing, the `.zst` suffix is removed from the source filename to get the target filename -### Concatenation with .zst files -It is possible to concatenate `.zst` files as is. -`zstd` will decompress such files as if they were a single `.zst` file. +### Concatenation with .zst Files +It is possible to concatenate multiple `.zst` files. `zstd` will decompress +such agglomerated file as if it was a single `.zst` file. OPTIONS ------- -### Integer suffixes and special values +### Integer Suffixes and Special Values + In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers. There must be no space between the integer and the suffix. @@ -72,7 +73,8 @@ Multiply the integer by 1,048,576 (2\^20). `Mi`, `M`, and `MB` are accepted as synonyms for `MiB`. -### Operation mode +### Operation Mode + If multiple operation mode options are given, the last one takes effect. @@ -85,23 +87,25 @@ Decompress. * `-t`, `--test`: Test the integrity of compressed _files_. - This option is equivalent to `--decompress --stdout` except that the - decompressed data is discarded instead of being written to standard output. + This option is equivalent to `--decompress --stdout > /dev/null`, + decompressed data is discarded and checksummed for errors. No files are created or removed. * `-b#`: - Benchmark file(s) using compression level # -* `--train FILEs`: - Use FILEs as a training set to create a dictionary. + Benchmark file(s) using compression level _#_. + See _BENCHMARK_ below for a description of this operation. +* `--train FILES`: + Use _FILES_ as a training set to create a dictionary. The training set should contain a lot of small files (> 100). + See _DICTIONARY BUILDER_ below for a description of this operation. * `-l`, `--list`: Display information related to a zstd compressed file, such as size, ratio, and checksum. Some of these fields may not be available. - This command can be augmented with the `-v` modifier. + This command's output can be augmented with the `-v` modifier. -### Operation modifiers +### Operation Modifiers * `-#`: - `#` compression level \[1-19] (default: 3) + selects `#` compression level \[1-19\] (default: 3) * `--ultra`: unlocks high compression levels 20+ (maximum 22), using a lot more memory. Note that decompression will also require more memory when using these levels. @@ -119,27 +123,31 @@ which is either 64 in 32-bit mode, or 256 for 64-bit environments. This modifier does nothing if `zstd` is compiled without multithread support. * `--single-thread`: - Does not spawn a thread for compression, use a single thread for both I/O and compression. - In this mode, compression is serialized with I/O, which is slightly slower. - (This is different from `-T1`, which spawns 1 compression thread in parallel of I/O). - This mode is the only one available when multithread support is disabled. - Single-thread mode features lower memory usage. - Final compressed result is slightly different from `-T1`. + Use a single thread for both I/O and compression. + As compression is serialized with I/O, this can be slightly slower. + Single-thread mode features significantly lower memory usage, + which can be useful for systems with limited amount of memory, such as 32-bit systems. + + Note 1: this mode is the only available one when multithread support is disabled. + + Note 2: this mode is different from `-T1`, which spawns 1 compression thread in parallel with I/O. + Final compressed result is also slightly different from `-T1`. * `--auto-threads={physical,logical} (default: physical)`: When using a default amount of threads via `-T0`, choose the default based on the number of detected physical or logical cores. -* `--adapt[=min=#,max=#]` : +* `--adapt[=min=#,max=#]`: `zstd` will dynamically adapt compression level to perceived I/O conditions. Compression level adaptation can be observed live by using command `-v`. Adaptation can be constrained between supplied `min` and `max` levels. The feature works when combined with multi-threading and `--long` mode. It does not work with `--single-thread`. - It sets window size to 8 MB by default (can be changed manually, see `wlog`). + It sets window size to 8 MiB by default (can be changed manually, see `wlog`). Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible. - _note_ : at the time of this writing, `--adapt` can remain stuck at low speed + + _Note_: at the time of this writing, `--adapt` can remain stuck at low speed when combined with multiple worker threads (>=2). * `--long[=#]`: - enables long distance matching with `#` `windowLog`, if not `#` is not + enables long distance matching with `#` `windowLog`, if `#` is not present it defaults to `27`. This increases the window size (`windowLog`) and memory usage for both the compressor and decompressor. @@ -153,22 +161,26 @@ * `--patch-from FILE`: Specify the file to be used as a reference point for zstd's diff engine. This is effectively dictionary compression with some convenient parameter - selection, namely that windowSize > srcSize. + selection, namely that _windowSize_ > _srcSize_. + + Note: cannot use both this and `-D` together. - Note: cannot use both this and -D together - Note: `--long` mode will be automatically activated if chainLog < fileLog - (fileLog being the windowLog required to cover the whole file). You + Note: `--long` mode will be automatically activated if _chainLog_ < _fileLog_ + (_fileLog_ being the _windowLog_ required to cover the whole file). You can also manually force it. - Node: for all levels, you can use --patch-from in --single-thread mode - to improve compression ratio at the cost of speed + + Note: for all levels, you can use `--patch-from` in `--single-thread` mode + to improve compression ratio at the cost of speed. + Note: for level 19, you can get increased compression ratio at the cost of speed by specifying `--zstd=targetLength=` to be something large - (i.e 4096), and by setting a large `--zstd=chainLog=` -* `--rsyncable` : + (i.e. 4096), and by setting a large `--zstd=chainLog=`. +* `--rsyncable`: `zstd` will periodically synchronize the compression state to make the - compressed file more rsync-friendly. There is a negligible impact to - compression ratio, and the faster compression levels will see a small - compression speed hit. + compressed file more rsync-friendly. + There is a negligible impact to compression ratio, + and a potential impact to compression speed, perceptible at higher speeds, + for example when combining `--rsyncable` with many parallel worker threads. This feature does not work with `--single-thread`. You probably don't want to use it with long range mode, since it will decrease the effectiveness of the synchronization points, but your mileage may vary. @@ -177,24 +189,24 @@ * `--[no-]content-size`: enable / disable whether or not the original size of the file is placed in the header of the compressed file. The default option is - --content-size (meaning that the original size will be placed in the header). + `--content-size` (meaning that the original size will be placed in the header). * `--no-dictID`: do not store dictionary ID within frame header (dictionary compression). The decoder will have to rely on implicit knowledge about which dictionary to use, it won't be able to check if it's correct. * `-M#`, `--memory=#`: - Set a memory usage limit. By default, Zstandard uses 128 MB for decompression + Set a memory usage limit. By default, `zstd` uses 128 MiB for decompression as the maximum amount of memory the decompressor is allowed to use, but you can - override this manually if need be in either direction (ie. you can increase or + override this manually if need be in either direction (i.e. you can increase or decrease it). - This is also used during compression when using with --patch-from=. In this case, - this parameter overrides that maximum size allowed for a dictionary. (128 MB). + This is also used during compression when using with `--patch-from=`. In this case, + this parameter overrides that maximum size allowed for a dictionary. (128 MiB). Additionally, this can be used to limit memory for dictionary training. This parameter - overrides the default limit of 2 GB. zstd will load training samples up to the memory limit + overrides the default limit of 2 GiB. zstd will load training samples up to the memory limit and ignore the rest. -* `--stream-size=#` : +* `--stream-size=#`: Sets the pledged source size of input coming from a stream. This value must be exact, as it will be included in the produced frame header. Incorrect stream sizes will cause an error. This information will be used to better optimize compression parameters, resulting in @@ -207,12 +219,14 @@ Exact guesses result in better compression ratios. Overestimates result in slightly degraded compression ratios, while underestimates may result in significant degradation. * `-o FILE`: - save result into `FILE` + save result into `FILE`. * `-f`, `--force`: disable input and output checks. Allows overwriting existing files, input from console, output to stdout, operating on links, block devices, etc. + During decompression and when the output destination is stdout, pass-through + unrecognized formats as-is. * `-c`, `--stdout`: - write to standard output (even if it is the console) + write to standard output (even if it is the console); keep original files unchanged. * `--[no-]sparse`: enable / disable sparse FS support, to make files with many zeroes smaller on disk. @@ -221,9 +235,17 @@ default: enabled when output is into a file, and disabled when output is stdout. This setting overrides default and can force sparse mode over stdout. +* `--[no-]pass-through` + enable / disable passing through uncompressed files as-is. During + decompression when pass-through is enabled, unrecognized formats will be + copied as-is from the input to the output. By default, pass-through will + occur when the output destination is stdout and the force (`-f`) option is + set. * `--rm`: - remove source file(s) after successful compression or decompression. If used in combination with - -o, will trigger a confirmation prompt (which can be silenced with -f), as this is a destructive operation. + remove source file(s) after successful compression or decompression. + This command is silently ignored if output is `stdout`. + If used in combination with `-o`, + triggers a confirmation prompt (which can be silenced with `-f`), as this is a destructive operation. * `-k`, `--keep`: keep source file(s) after successful compression or decompression. This is the default behavior. @@ -262,7 +284,7 @@ display help/long help and exit * `-V`, `--version`: display version number and exit. - Advanced : `-vV` also displays supported formats. + Advanced: `-vV` also displays supported formats. `-vvV` also displays POSIX support. `-q` will only display the version number, suitable for machine reading. * `-v`, `--verbose`: @@ -273,17 +295,27 @@ * `--no-progress`: do not display the progress bar, but keep all other messages. * `--show-default-cparams`: - Shows the default compression parameters that will be used for a - particular src file. If the provided src file is not a regular file - (eg. named pipe), the cli will just output the default parameters. - That is, the parameters that are used when the src size is unknown. + shows the default compression parameters that will be used for a particular input file, based on the provided compression level and the input size. + If the provided file is not a regular file (e.g. a pipe), this flag will output the parameters used for inputs of unknown size. * `--`: All arguments after `--` are treated as files -### Restricted usage of Environment Variables -Using environment variables to set parameters has security implications. -Therefore, this avenue is intentionally restricted. +### gzip Operation Modifiers +When invoked via a `gzip` symlink, `zstd` will support further +options that intend to mimic the `gzip` behavior: + +* `-n`, `--no-name`: + do not store the original filename and timestamps when compressing + a file. This is the default behavior and hence a no-op. +* `--best`: + alias to the option `-9`. + + +### Environment Variables + +Employing environment variables to set parameters has security implications. +Therefore, this avenue is intentionally limited. Only `ZSTD_CLEVEL` and `ZSTD_NBTHREADS` are currently supported. They set the compression level and number of threads to use during compression, respectively. @@ -293,8 +325,8 @@ `ZSTD_NBTHREADS` can be used to set the number of threads `zstd` will attempt to use during compression. If the value of `ZSTD_NBTHREADS` is not a valid unsigned integer, it will be ignored with a warning message. -`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200. `zstd` must be -compiled with multithread support for this to have any effect. +`ZSTD_NBTHREADS` has a default value of (`1`), and is capped at ZSTDMT_NBWORKERS_MAX==200. +`zstd` must be compiled with multithread support for this to have any effect. They can both be overridden by corresponding command line arguments: `-#` for compression level and `-T#` for number of compression threads. @@ -306,27 +338,36 @@ which greatly improves efficiency on small files and messages. It's possible to train `zstd` with a set of samples, the result of which is saved into a file called a `dictionary`. -Then during compression and decompression, reference the same dictionary, +Then, during compression and decompression, reference the same dictionary, using command `-D dictionaryFileName`. Compression of small files similar to the sample set will be greatly improved. * `--train FILEs`: Use FILEs as training set to create a dictionary. - The training set should contain a lot of small files (> 100), + The training set should ideally contain a lot of samples (> 100), and weight typically 100x the target dictionary size - (for example, 10 MB for a 100 KB dictionary). + (for example, ~10 MB for a 100 KB dictionary). `--train` can be combined with `-r` to indicate a directory rather than listing all the files, which can be useful to circumvent shell expansion limits. + Since dictionary compression is mostly effective for small files, + the expectation is that the training set will only contain small files. + In the case where some samples happen to be large, + only the first 128 KiB of these samples will be used for training. + `--train` supports multithreading if `zstd` is compiled with threading support (default). - Additional parameters can be specified with `--train-fastcover`. + Additional advanced parameters can be specified with `--train-fastcover`. The legacy dictionary builder can be accessed with `--train-legacy`. The slower cover dictionary builder can be accessed with `--train-cover`. - Default is equivalent to `--train-fastcover=d=8,steps=4`. -* `-o file`: - Dictionary saved into `file` (default name: dictionary). + Default `--train` is equivalent to `--train-fastcover=d=8,steps=4`. + +* `-o FILE`: + Dictionary saved into `FILE` (default name: dictionary). * `--maxdict=#`: - Limit dictionary to specified size (default: 112640). + Limit dictionary to specified size (default: 112640 bytes). + As usual, quantities are expressed in bytes by default, + and it's possible to employ suffixes (like `KB` or `MB`) + to specify larger values. * `-#`: Use `#` compression level during training (optional). Will generate statistics more tuned for selected compression level, @@ -334,17 +375,39 @@ * `-B#`: Split input files into blocks of size # (default: no split) * `-M#`, `--memory=#`: - Limit the amount of sample data loaded for training (default: 2 GB). See above for details. + Limit the amount of sample data loaded for training (default: 2 GB). + Note that the default (2 GB) is also the maximum. + This parameter can be useful in situations where the training set size + is not well controlled and could be potentially very large. + Since speed of the training process is directly correlated to + the size of the training sample set, + a smaller sample set leads to faster training. + + In situations where the training set is larger than maximum memory, + the CLI will randomly select samples among the available ones, + up to the maximum allowed memory budget. + This is meant to improve dictionary relevance + by mitigating the potential impact of clustering, + such as selecting only files from the beginning of a list + sorted by modification date, or sorted by alphabetical order. + The randomization process is deterministic, so + training of the same list of files with the same parameters + will lead to the creation of the same dictionary. + * `--dictID=#`: - A dictionary ID is a locally unique ID - that a decoder can use to verify it is using the right dictionary. + A dictionary ID is a locally unique ID. + The decoder will use this value to verify it is using the right dictionary. By default, zstd will create a 4-bytes random number ID. - It's possible to give a precise number instead. - Short numbers have an advantage : an ID < 256 will only need 1 byte in the - compressed frame header, and an ID < 65536 will only need 2 bytes. - This compares favorably to 4 bytes default. - However, it's up to the dictionary manager to not assign twice the same ID to + It's possible to provide an explicit number ID instead. + It's up to the dictionary manager to not assign twice the same ID to 2 different dictionaries. + Note that short numbers have an advantage: + an ID < 256 will only need 1 byte in the compressed frame header, + and an ID < 65536 will only need 2 bytes. + This compares favorably to 4 bytes default. + + Note that RFC8878 reserves IDs less than 32768 and greater than or equal to 2\^31, so they should not be used in public. + * `--train-cover[=k#,d=#,steps=#,split=#,shrink[=#]]`: Select parameters for the default dictionary builder algorithm named cover. If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8. @@ -409,7 +472,7 @@ Use legacy dictionary builder algorithm with the given dictionary _selectivity_ (default: 9). The smaller the _selectivity_ value, the denser the dictionary, - improving its efficiency but reducing its possible maximum size. + improving its efficiency but reducing its achievable maximum size. `--train-legacy=s=#` is also accepted. Examples: @@ -429,30 +492,31 @@ * `-i#`: minimum evaluation time, in seconds (default: 3s), benchmark mode only * `-B#`, `--block-size=#`: - cut file(s) into independent blocks of size # (default: no block) + cut file(s) into independent chunks of size # (default: no chunking) * `--priority=rt`: set process priority to real-time -**Output Format:** CompressionLevel#Filename : IntputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed +**Output Format:** CompressionLevel#Filename: InputSize -> OutputSize (CompressionRatio), CompressionSpeed, DecompressionSpeed **Methodology:** For both compression and decompression speed, the entire input is compressed/decompressed in-memory to measure speed. A run lasts at least 1 sec, so when files are small, they are compressed/decompressed several times per run, in order to improve measurement accuracy. ADVANCED COMPRESSION OPTIONS ---------------------------- ### -B#: -Select the size of each compression job. +Specify the size of each compression job. This parameter is only available when multi-threading is enabled. Each compression job is run in parallel, so this value indirectly impacts the nb of active threads. Default job size varies depending on compression level (generally `4 * windowSize`). `-B#` makes it possible to manually select a custom size. Note that job size must respect a minimum value which is enforced transparently. This minimum is either 512 KB, or `overlapSize`, whichever is largest. -Different job sizes will lead to (slightly) different compressed frames. +Different job sizes will lead to non-identical compressed frames. ### --zstd[=options]: -`zstd` provides 22 predefined compression levels. -The selected or default predefined compression level can be changed with -advanced compression options. +`zstd` provides 22 predefined regular compression levels plus the fast levels. +This compression level is translated internally into a number of specific parameters that actually control the behavior of the compressor. +(You can see the result of this translation with `--show-default-cparams`.) +These specific parameters can be overridden with advanced compression options. The _options_ are provided as a comma-separated list. You may specify only the options you want to change and the rest will be taken from the selected or default compression level. @@ -461,10 +525,10 @@ - `strategy`=_strat_, `strat`=_strat_: Specify a strategy used by a match finder. - There are 9 strategies numbered from 1 to 9, from faster to stronger: - 1=ZSTD\_fast, 2=ZSTD\_dfast, 3=ZSTD\_greedy, - 4=ZSTD\_lazy, 5=ZSTD\_lazy2, 6=ZSTD\_btlazy2, - 7=ZSTD\_btopt, 8=ZSTD\_btultra, 9=ZSTD\_btultra2. + There are 9 strategies numbered from 1 to 9, from fastest to strongest: + 1=`ZSTD_fast`, 2=`ZSTD_dfast`, 3=`ZSTD_greedy`, + 4=`ZSTD_lazy`, 5=`ZSTD_lazy2`, 6=`ZSTD_btlazy2`, + 7=`ZSTD_btopt`, 8=`ZSTD_btultra`, 9=`ZSTD_btultra2`. - `windowLog`=_wlog_, `wlog`=_wlog_: Specify the maximum number of bits for a match distance. @@ -481,22 +545,23 @@ - `hashLog`=_hlog_, `hlog`=_hlog_: Specify the maximum number of bits for a hash table. - Bigger hash tables cause less collisions which usually makes compression + Bigger hash tables cause fewer collisions which usually makes compression faster, but requires more memory during compression. - The minimum _hlog_ is 6 (64 B) and the maximum is 30 (1 GiB). + The minimum _hlog_ is 6 (64 entries / 256 B) and the maximum is 30 (1B entries / 4 GiB). - `chainLog`=_clog_, `clog`=_clog_: - Specify the maximum number of bits for a hash chain or a binary tree. + Specify the maximum number of bits for the secondary search structure, + whose form depends on the selected `strategy`. Higher numbers of bits increases the chance to find a match which usually improves compression ratio. It also slows down compression speed and increases memory requirements for compression. - This option is ignored for the ZSTD_fast strategy. + This option is ignored for the `ZSTD_fast` `strategy`, which only has the primary hash table. - The minimum _clog_ is 6 (64 B) and the maximum is 29 (524 Mib) on 32-bit platforms - and 30 (1 Gib) on 64-bit platforms. + The minimum _clog_ is 6 (64 entries / 256 B) and the maximum is 29 (512M entries / 2 GiB) on 32-bit platforms + and 30 (1B entries / 4 GiB) on 64-bit platforms. - `searchLog`=_slog_, `slog`=_slog_: Specify the maximum number of searches in a hash chain or a binary tree @@ -518,19 +583,19 @@ - `targetLength`=_tlen_, `tlen`=_tlen_: The impact of this field vary depending on selected strategy. - For ZSTD\_btopt, ZSTD\_btultra and ZSTD\_btultra2, it specifies + For `ZSTD_btopt`, `ZSTD_btultra` and `ZSTD_btultra2`, it specifies the minimum match length that causes match finder to stop searching. A larger `targetLength` usually improves compression ratio but decreases compression speed. -t - For ZSTD\_fast, it triggers ultra-fast mode when > 0. + + For `ZSTD_fast`, it triggers ultra-fast mode when > 0. The value represents the amount of data skipped between match sampling. - Impact is reversed : a larger `targetLength` increases compression speed + Impact is reversed: a larger `targetLength` increases compression speed but decreases compression ratio. For all other strategies, this field has no impact. - The minimum _tlen_ is 0 and the maximum is 128 Kib. + The minimum _tlen_ is 0 and the maximum is 128 KiB. - `overlapLog`=_ovlog_, `ovlog`=_ovlog_: Determine `overlapSize`, amount of data reloaded from previous job. @@ -542,7 +607,7 @@ 9 means "full overlap", meaning up to `windowSize` is reloaded from previous job. Reducing _ovlog_ by 1 reduces the reloaded amount by a factor 2. For example, 8 means "windowSize/2", and 6 means "windowSize/8". - Value 0 is special and means "default" : _ovlog_ is automatically determined by `zstd`. + Value 0 is special and means "default": _ovlog_ is automatically determined by `zstd`. In which case, _ovlog_ will range from 6 to 9, depending on selected _strat_. - `ldmHashLog`=_lhlog_, `lhlog`=_lhlog_: @@ -592,6 +657,11 @@ `--zstd`=wlog=23,clog=23,hlog=22,slog=6,mml=3,tlen=48,strat=6 +SEE ALSO +-------- +`zstdgrep`(1), `zstdless`(1), `gzip`(1), `xz`(1) + +The format is specified in Y. Collet, "Zstandard Compression and the 'application/zstd' Media Type", https://www.ietf.org/rfc/rfc8878.txt, Internet RFC 8878 (February 2021). BUGS ---- diff --git a/sys/contrib/zstd/programs/zstdcli.c b/sys/contrib/zstd/programs/zstdcli.c --- a/sys/contrib/zstd/programs/zstdcli.c +++ b/sys/contrib/zstd/programs/zstdcli.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -27,8 +27,8 @@ /*-************************************ * Dependencies **************************************/ -#include "platform.h" /* IS_CONSOLE, PLATFORM_POSIX_VERSION */ -#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList */ +#include "platform.h" /* PLATFORM_POSIX_VERSION */ +#include "util.h" /* UTIL_HAS_CREATEFILELIST, UTIL_createFileList, UTIL_isConsole */ #include /* getenv */ #include /* strcmp, strlen */ #include /* fprintf(), stdin, stdout, stderr */ @@ -37,7 +37,7 @@ #include "fileio.h" /* stdinmark, stdoutmark, ZSTD_EXTENSION */ #ifndef ZSTD_NOBENCH -# include "benchzstd.h" /* BMK_benchFiles */ +# include "benchzstd.h" /* BMK_benchFilesAdvanced */ #endif #ifndef ZSTD_NODICT # include "dibio.h" /* ZDICT_cover_params_t, DiB_trainFromFiles() */ @@ -46,17 +46,18 @@ # include "zstdcli_trace.h" #endif #include "../lib/zstd.h" /* ZSTD_VERSION_STRING, ZSTD_minCLevel, ZSTD_maxCLevel */ +#include "fileio_asyncio.h" /*-************************************ * Constants **************************************/ -#define COMPRESSOR_NAME "zstd command line interface" +#define COMPRESSOR_NAME "Zstandard CLI" #ifndef ZSTD_VERSION # define ZSTD_VERSION "v" ZSTD_VERSION_STRING #endif #define AUTHOR "Yann Collet" -#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR +#define WELCOME_MESSAGE "*** %s (%i-bit) %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR #define ZSTD_ZSTDMT "zstdmt" #define ZSTD_UNZSTD "unzstd" @@ -124,6 +125,15 @@ } +/*! exeNameMatch() : + @return : a non-zero value if exeName matches test, excluding the extension + */ +static int exeNameMatch(const char* exeName, const char* test) +{ + return !strncmp(exeName, test, strlen(test)) && + (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); +} + /*-************************************ * Command Line **************************************/ @@ -133,143 +143,175 @@ */ static void usage(FILE* f, const char* programName) { - DISPLAY_F(f, "Usage : \n"); - DISPLAY_F(f, " %s [args] [FILE(s)] [-o file] \n", programName); + DISPLAY_F(f, "Compress or decompress the INPUT file(s); reads from STDIN if INPUT is `-` or not provided.\n\n"); + DISPLAY_F(f, "Usage: %s [OPTIONS...] [INPUT... | -] [-o OUTPUT]\n\n", programName); + DISPLAY_F(f, "Options:\n"); + DISPLAY_F(f, " -o OUTPUT Write output to a single file, OUTPUT.\n"); + DISPLAY_F(f, " -k, --keep Preserve INPUT file(s). [Default] \n"); + DISPLAY_F(f, " --rm Remove INPUT file(s) after successful (de)compression.\n"); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAY_F(f, " -n, --no-name Do not store original filename when compressing.\n\n"); + } +#endif DISPLAY_F(f, "\n"); - DISPLAY_F(f, "FILE : a filename \n"); - DISPLAY_F(f, " with no FILE, or when FILE is - , read standard input\n"); - DISPLAY_F(f, "Arguments : \n"); #ifndef ZSTD_NOCOMPRESS - DISPLAY_F(f, " -# : # compression level (1-%d, default: %d) \n", ZSTDCLI_CLEVEL_MAX, ZSTDCLI_CLEVEL_DEFAULT); + DISPLAY_F(f, " -# Desired compression level, where `#` is a number between 1 and %d;\n", ZSTDCLI_CLEVEL_MAX); + DISPLAY_F(f, " lower numbers provide faster compression, higher numbers yield\n"); + DISPLAY_F(f, " better compression ratios. [Default: %d]\n\n", ZSTDCLI_CLEVEL_DEFAULT); #endif #ifndef ZSTD_NODECOMPRESS - DISPLAY_F(f, " -d : decompression \n"); + DISPLAY_F(f, " -d, --decompress Perform decompression.\n"); #endif - DISPLAY_F(f, " -D DICT: use DICT as Dictionary for compression or decompression \n"); - DISPLAY_F(f, " -o file: result stored into `file` (only 1 output file) \n"); - DISPLAY_F(f, " -f : disable input and output checks. Allows overwriting existing files,\n"); - DISPLAY_F(f, " input from console, output to stdout, operating on links,\n"); - DISPLAY_F(f, " block devices, etc.\n"); - DISPLAY_F(f, "--rm : remove source file(s) after successful de/compression \n"); - DISPLAY_F(f, " -k : preserve source file(s) (default) \n"); - DISPLAY_F(f, " -h/-H : display help/long help and exit \n"); + DISPLAY_F(f, " -D DICT Use DICT as the dictionary for compression or decompression.\n\n"); + DISPLAY_F(f, " -f, --force Disable input and output checks. Allows overwriting existing files,\n"); + DISPLAY_F(f, " receiving input from the console, printing output to STDOUT, and\n"); + DISPLAY_F(f, " operating on links, block devices, etc. Unrecognized formats will be\n"); + DISPLAY_F(f, " passed-through through as-is.\n\n"); + + DISPLAY_F(f, " -h Display short usage and exit.\n"); + DISPLAY_F(f, " -H, --help Display full help and exit.\n"); + DISPLAY_F(f, " -V, --version Display the program version and exit.\n"); + DISPLAY_F(f, "\n"); } static void usage_advanced(const char* programName) { DISPLAYOUT(WELCOME_MESSAGE); + DISPLAYOUT("\n"); usage(stdout, programName); - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced arguments : \n"); - DISPLAYOUT( " -V : display Version number and exit \n"); - - DISPLAYOUT( " -c : write to standard output (even if it is the console) \n"); + DISPLAYOUT("Advanced options:\n"); + DISPLAYOUT(" -c, --stdout Write to STDOUT (even if it is a console) and keep the INPUT file(s).\n\n"); - DISPLAYOUT( " -v : verbose mode; specify multiple times to increase verbosity \n"); - DISPLAYOUT( " -q : suppress warnings; specify twice to suppress errors too \n"); - DISPLAYOUT( "--[no-]progress : forcibly display, or never display the progress counter.\n"); - DISPLAYOUT( " note: any (de)compressed output to terminal will mix with progress counter text. \n"); + DISPLAYOUT(" -v, --verbose Enable verbose output; pass multiple times to increase verbosity.\n"); + DISPLAYOUT(" -q, --quiet Suppress warnings; pass twice to suppress errors.\n"); +#ifndef ZSTD_NOTRACE + DISPLAYOUT(" --trace LOG Log tracing information to LOG.\n"); +#endif + DISPLAYOUT("\n"); + DISPLAYOUT(" --[no-]progress Forcibly show/hide the progress counter. NOTE: Any (de)compressed\n"); + DISPLAYOUT(" output to terminal will mix with progress counter text.\n\n"); #ifdef UTIL_HAS_CREATEFILELIST - DISPLAYOUT( " -r : operate recursively on directories \n"); - DISPLAYOUT( "--filelist FILE : read list of files to operate upon from FILE \n"); - DISPLAYOUT( "--output-dir-flat DIR : processed files are stored into DIR \n"); + DISPLAYOUT(" -r Operate recursively on directories.\n"); + DISPLAYOUT(" --filelist LIST Read a list of files to operate on from LIST.\n"); + DISPLAYOUT(" --output-dir-flat DIR Store processed files in DIR.\n"); #endif #ifdef UTIL_HAS_MIRRORFILELIST - DISPLAYOUT( "--output-dir-mirror DIR : processed files are stored into DIR respecting original directory structure \n"); + DISPLAYOUT(" --output-dir-mirror DIR Store processed files in DIR, respecting original directory structure.\n"); #endif + if (AIO_supported()) + DISPLAYOUT(" --[no-]asyncio Use asynchronous IO. [Default: Enabled]\n"); - + DISPLAYOUT("\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAYOUT( "--[no-]check : during compression, add XXH64 integrity checksum to frame (default: enabled)"); + DISPLAYOUT(" --[no-]check Add XXH64 integrity checksums during compression. [Default: Add, Validate]\n"); #ifndef ZSTD_NODECOMPRESS - DISPLAYOUT( ". If specified with -d, decompressor will ignore/validate checksums in compressed frame (default: validate)."); + DISPLAYOUT(" If `-d` is present, ignore/validate checksums during decompression.\n"); #endif #else #ifdef ZSTD_NOCOMPRESS - DISPLAYOUT( "--[no-]check : during decompression, ignore/validate checksums in compressed frame (default: validate)."); + DISPLAYOUT(" --[no-]check Ignore/validate checksums during decompression. [Default: Validate]"); #endif #endif /* ZSTD_NOCOMPRESS */ -#ifndef ZSTD_NOTRACE - DISPLAYOUT( "\n"); - DISPLAYOUT( "--trace FILE : log tracing information to FILE."); -#endif - DISPLAYOUT( "\n"); - - DISPLAYOUT( "-- : All arguments after \"--\" are treated as files \n"); + DISPLAYOUT("\n"); + DISPLAYOUT(" -- Treat remaining arguments after `--` as files.\n"); #ifndef ZSTD_NOCOMPRESS - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced compression arguments : \n"); - DISPLAYOUT( "--ultra : enable levels beyond %i, up to %i (requires more memory) \n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); - DISPLAYOUT( "--long[=#]: enable long distance matching with given window log (default: %u) \n", g_defaultMaxWindowLog); - DISPLAYOUT( "--fast[=#]: switch to very fast compression levels (default: %u) \n", 1); - DISPLAYOUT( "--adapt : dynamically adapt compression level to I/O conditions \n"); - DISPLAYOUT( "--[no-]row-match-finder : force enable/disable usage of fast row-based matchfinder for greedy, lazy, and lazy2 strategies \n"); - DISPLAYOUT( "--patch-from=FILE : specify the file to be used as a reference point for zstd's diff engine. \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced compression options:\n"); + DISPLAYOUT(" --ultra Enable levels beyond %i, up to %i; requires more memory.\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel()); + DISPLAYOUT(" --fast[=#] Use to very fast compression levels. [Default: %u]\n", 1); +#ifdef ZSTD_GZCOMPRESS + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + DISPLAYOUT(" --best Compatibility alias for `-9`.\n"); + } +#endif + DISPLAYOUT(" --adapt Dynamically adapt compression level to I/O conditions.\n"); + DISPLAYOUT(" --long[=#] Enable long distance matching with window log #. [Default: %u]\n", g_defaultMaxWindowLog); + DISPLAYOUT(" --patch-from=REF Use REF as the reference point for Zstandard's diff engine. \n\n"); # ifdef ZSTD_MULTITHREAD - DISPLAYOUT( " -T# : spawns # compression threads (default: 1, 0==# cores) \n"); - DISPLAYOUT( " -B# : select size of each job (default: 0==automatic) \n"); - DISPLAYOUT( "--single-thread : use a single thread for both I/O and compression (result slightly different than -T1) \n"); - DISPLAYOUT( "--auto-threads={physical,logical} (default: physical} : use either physical cores or logical cores as default when specifying -T0 \n"); - DISPLAYOUT( "--rsyncable : compress using a rsync-friendly method (-B sets block size) \n"); + DISPLAYOUT(" -T# Spawn # compression threads. [Default: 1; pass 0 for core count.]\n"); + DISPLAYOUT(" --single-thread Share a single thread for I/O and compression (slightly different than `-T1`).\n"); + DISPLAYOUT(" --auto-threads={physical|logical}\n"); + DISPLAYOUT(" Use physical/logical cores when using `-T0`. [Default: Physical]\n\n"); + DISPLAYOUT(" -B# Set job size to #. [Default: 0 (automatic)]\n"); + DISPLAYOUT(" --rsyncable Compress using a rsync-friendly method (`-B` sets block size). \n"); + DISPLAYOUT("\n"); # endif - DISPLAYOUT( "--exclude-compressed: only compress files that are not already compressed \n"); - DISPLAYOUT( "--stream-size=# : specify size of streaming input from `stdin` \n"); - DISPLAYOUT( "--size-hint=# optimize compression parameters for streaming input of approximately this size \n"); - DISPLAYOUT( "--target-compressed-block-size=# : generate compressed block of approximately targeted size \n"); - DISPLAYOUT( "--no-dictID : don't write dictID into header (dictionary compression only) \n"); - DISPLAYOUT( "--[no-]compress-literals : force (un)compressed literals \n"); - - DISPLAYOUT( "--format=zstd : compress files to the .zst format (default) \n"); + DISPLAYOUT(" --exclude-compressed Only compress files that are not already compressed.\n\n"); + + DISPLAYOUT(" --stream-size=# Specify size of streaming input from STDIN.\n"); + DISPLAYOUT(" --size-hint=# Optimize compression parameters for streaming input of approximately size #.\n"); + DISPLAYOUT(" --target-compressed-block-size=#\n"); + DISPLAYOUT(" Generate compressed blocks of approximately # size.\n\n"); + DISPLAYOUT(" --no-dictID Don't write `dictID` into the header (dictionary compression only).\n"); + DISPLAYOUT(" --[no-]compress-literals Force (un)compressed literals.\n"); + DISPLAYOUT(" --[no-]row-match-finder Explicitly enable/disable the fast, row-based matchfinder for\n"); + DISPLAYOUT(" the 'greedy', 'lazy', and 'lazy2' strategies.\n"); + + DISPLAYOUT("\n"); + DISPLAYOUT(" --format=zstd Compress files to the `.zst` format. [Default]\n"); + DISPLAYOUT(" --mmap-dict Memory-map dictionary file rather than mallocing and loading all at once"); #ifdef ZSTD_GZCOMPRESS - DISPLAYOUT( "--format=gzip : compress files to the .gz format \n"); + DISPLAYOUT(" --format=gzip Compress files to the `.gz` format.\n"); #endif #ifdef ZSTD_LZMACOMPRESS - DISPLAYOUT( "--format=xz : compress files to the .xz format \n"); - DISPLAYOUT( "--format=lzma : compress files to the .lzma format \n"); + DISPLAYOUT(" --format=xz Compress files to the `.xz` format.\n"); + DISPLAYOUT(" --format=lzma Compress files to the `.lzma` format.\n"); #endif #ifdef ZSTD_LZ4COMPRESS - DISPLAYOUT( "--format=lz4 : compress files to the .lz4 format \n"); + DISPLAYOUT( " --format=lz4 Compress files to the `.lz4` format.\n"); #endif #endif /* !ZSTD_NOCOMPRESS */ #ifndef ZSTD_NODECOMPRESS - DISPLAYOUT( "\n"); - DISPLAYOUT( "Advanced decompression arguments : \n"); - DISPLAYOUT( " -l : print information about zstd compressed files \n"); - DISPLAYOUT( "--test : test compressed file integrity \n"); - DISPLAYOUT( " -M# : Set a memory usage limit for decompression \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Advanced decompression options:\n"); + DISPLAYOUT(" -l Print information about Zstandard-compressed files.\n"); + DISPLAYOUT(" --test Test compressed file integrity.\n"); + DISPLAYOUT(" -M# Set the memory usage limit to # megabytes.\n"); # if ZSTD_SPARSE_DEFAULT - DISPLAYOUT( "--[no-]sparse : sparse mode (default: enabled on file, disabled on stdout) \n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Enabled for files, disabled for STDOUT.]\n"); # else - DISPLAYOUT( "--[no-]sparse : sparse mode (default: disabled) \n"); + DISPLAYOUT(" --[no-]sparse Enable sparse mode. [Default: Disabled]\n"); # endif + { + char const* passThroughDefault = "Disabled"; + if (exeNameMatch(programName, ZSTD_CAT) || + exeNameMatch(programName, ZSTD_ZCAT) || + exeNameMatch(programName, ZSTD_GZCAT)) { + passThroughDefault = "Enabled"; + } + DISPLAYOUT(" --[no-]pass-through Pass through uncompressed files as-is. [Default: %s]\n", passThroughDefault); + } #endif /* ZSTD_NODECOMPRESS */ #ifndef ZSTD_NODICT - DISPLAYOUT( "\n"); - DISPLAYOUT( "Dictionary builder : \n"); - DISPLAYOUT( "--train ## : create a dictionary from a training set of files \n"); - DISPLAYOUT( "--train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]] : use the cover algorithm with optional args \n"); - DISPLAYOUT( "--train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]] : use the fast cover algorithm with optional args \n"); - DISPLAYOUT( "--train-legacy[=s=#] : use the legacy algorithm with selectivity (default: %u) \n", g_defaultSelectivityLevel); - DISPLAYOUT( " -o DICT : DICT is dictionary name (default: %s) \n", g_defaultDictName); - DISPLAYOUT( "--maxdict=# : limit dictionary to specified size (default: %u) \n", g_defaultMaxDictSize); - DISPLAYOUT( "--dictID=# : force dictionary ID to specified value (default: random) \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Dictionary builder:\n"); + DISPLAYOUT(" --train Create a dictionary from a training set of files.\n\n"); + DISPLAYOUT(" --train-cover[=k=#,d=#,steps=#,split=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the cover algorithm (with optional arguments).\n"); + DISPLAYOUT(" --train-fastcover[=k=#,d=#,f=#,steps=#,split=#,accel=#,shrink[=#]]\n"); + DISPLAYOUT(" Use the fast cover algorithm (with optional arguments).\n\n"); + DISPLAYOUT(" --train-legacy[=s=#] Use the legacy algorithm with selectivity #. [Default: %u]\n", g_defaultSelectivityLevel); + DISPLAYOUT(" -o NAME Use NAME as dictionary name. [Default: %s]\n", g_defaultDictName); + DISPLAYOUT(" --maxdict=# Limit dictionary to specified size #. [Default: %u]\n", g_defaultMaxDictSize); + DISPLAYOUT(" --dictID=# Force dictionary ID to #. [Default: Random]\n"); #endif #ifndef ZSTD_NOBENCH - DISPLAYOUT( "\n"); - DISPLAYOUT( "Benchmark arguments : \n"); - DISPLAYOUT( " -b# : benchmark file(s), using # compression level (default: %d) \n", ZSTDCLI_CLEVEL_DEFAULT); - DISPLAYOUT( " -e# : test all compression levels successively from -b# to -e# (default: 1) \n"); - DISPLAYOUT( " -i# : minimum evaluation time in seconds (default: 3s) \n"); - DISPLAYOUT( " -B# : cut file into independent blocks of size # (default: no block) \n"); - DISPLAYOUT( " -S : output one benchmark result per input file (default: consolidated result) \n"); - DISPLAYOUT( "--priority=rt : set process priority to real-time \n"); + DISPLAYOUT("\n"); + DISPLAYOUT("Benchmark options:\n"); + DISPLAYOUT(" -b# Perform benchmarking with compression level #. [Default: %d]\n", ZSTDCLI_CLEVEL_DEFAULT); + DISPLAYOUT(" -e# Test all compression levels up to #; starting level is `-b#`. [Default: 1]\n"); + DISPLAYOUT(" -i# Set the minimum evaluation to time # seconds. [Default: 3]\n"); + DISPLAYOUT(" -B# Cut file into independent chunks of size #. [Default: No chunking]\n"); + DISPLAYOUT(" -S Output one benchmark result per input file. [Default: Consolidated result]\n"); + DISPLAYOUT(" --priority=rt Set process priority to real-time.\n"); #endif } @@ -296,18 +338,9 @@ return name; } -/*! exeNameMatch() : - @return : a non-zero value if exeName matches test, excluding the extension - */ -static int exeNameMatch(const char* exeName, const char* test) -{ - return !strncmp(exeName, test, strlen(test)) && - (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.'); -} - static void errorOut(const char* msg) { - DISPLAY("%s \n", msg); exit(1); + DISPLAYLEVEL(1, "%s \n", msg); exit(1); } /*! readU32FromCharChecked() : @@ -634,6 +667,11 @@ #endif DISPLAYOUT("\n"); if (g_displayLevel >= 4) { + /* library versions */ + DISPLAYOUT("zlib version %s\n", FIO_zlibVersion()); + DISPLAYOUT("lz4 version %s\n", FIO_lz4Version()); + DISPLAYOUT("lzma version %s\n", FIO_lzmaVersion()); + /* posix support */ #ifdef _POSIX_C_SOURCE DISPLAYOUT("_POSIX_C_SOURCE defined: %ldL\n", (long) _POSIX_C_SOURCE); @@ -749,13 +787,13 @@ } else { \ argNb++; \ if (argNb >= argCount) { \ - DISPLAY("error: missing command argument \n"); \ + DISPLAYLEVEL(1, "error: missing command argument \n"); \ CLEAN_RETURN(1); \ } \ ptr = argv[argNb]; \ assert(ptr != NULL); \ if (ptr[0]=='-') { \ - DISPLAY("error: command cannot be separated from its argument by another command \n"); \ + DISPLAYLEVEL(1, "error: command cannot be separated from its argument by another command \n"); \ CLEAN_RETURN(1); \ } } } @@ -763,6 +801,18 @@ const char* __nb; \ NEXT_FIELD(__nb); \ val32 = readU32FromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ +} + +#define NEXT_TSIZE(valTsize) { \ + const char* __nb; \ + NEXT_FIELD(__nb); \ + valTsize = readSizeTFromChar(&__nb); \ + if(*__nb != 0) { \ + errorOut("error: only numeric values with optional suffixes K, KB, KiB, M, MB, MiB are allowed"); \ + } \ } typedef enum { zom_compress, zom_decompress, zom_test, zom_bench, zom_train, zom_list } zstd_operation_mode; @@ -788,9 +838,7 @@ hasStdout = 0, ldmFlag = 0, main_pause = 0, - nbWorkers = 0, adapt = 0, - useRowMatchFinder = 0, adaptMin = MINCLEVEL, adaptMax = MAXCLEVEL, rsyncable = 0, @@ -799,18 +847,22 @@ separateFiles = 0, setRealTimePrio = 0, singleThread = 0, -#ifdef ZSTD_MULTITHREAD defaultLogicalCores = 0, -#endif showDefaultCParams = 0, ultra=0, - contentSize=1; + contentSize=1, + removeSrcFile=0; + ZSTD_paramSwitch_e mmapDict=ZSTD_ps_auto; + ZSTD_paramSwitch_e useRowMatchFinder = ZSTD_ps_auto; + FIO_compressionType_t cType = FIO_zstdCompression; + unsigned nbWorkers = 0; double compressibility = 0.5; unsigned bench_nbSeconds = 3; /* would be better if this value was synchronized from bench */ size_t blockSize = 0; FIO_prefs_t* const prefs = FIO_createPreferences(); FIO_ctx_t* const fCtx = FIO_createContext(); + FIO_progressSetting_e progress = FIO_ps_auto; zstd_operation_mode operation = zom_compress; ZSTD_compressionParameters compressionParams; int cLevel = init_cLevel(); @@ -831,6 +883,7 @@ size_t streamSrcSize = 0; size_t targetCBlockSize = 0; size_t srcSizeHint = 0; + size_t nbInputFileNames = 0; int dictCLevel = g_defaultDictCLevel; unsigned dictSelect = g_defaultSelectivityLevel; #ifndef ZSTD_NODICT @@ -849,7 +902,7 @@ (void)recursive; (void)cLevelLast; /* not used when ZSTD_NOBENCH set */ (void)memLimit; assert(argCount >= 1); - if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAY("zstd: allocation error \n"); exit(1); } + if ((filenames==NULL) || (file_of_names==NULL)) { DISPLAYLEVEL(1, "zstd: allocation error \n"); exit(1); } programName = lastNameFromPath(programName); #ifdef ZSTD_MULTITHREAD nbWorkers = init_nbThreads(); @@ -858,17 +911,20 @@ /* preset behaviors */ if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbWorkers=0, singleThread=0; if (exeNameMatch(programName, ZSTD_UNZSTD)) operation=zom_decompress; - if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ - if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZ)) { suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like gzip */ - if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(prefs, 1); } /* behave like gunzip, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like lzma */ - if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lzmaCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unlzma, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like xz */ - if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_xzCompression); FIO_setRemoveSrcFile(prefs, 1); } /* behave like unxz, also supports multiple formats */ - if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like lz4 */ - if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; FIO_setCompressionType(prefs, FIO_lz4Compression); } /* behave like unlz4, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* supports multiple formats */ + if (exeNameMatch(programName, ZSTD_ZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like zcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + suffix = GZ_EXTENSION; cType = FIO_gzipCompression; removeSrcFile=1; + dictCLevel = cLevel = 6; /* gzip default is -6 */ + } + if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; removeSrcFile=1; } /* behave like gunzip, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; FIO_overwriteMode(prefs); forceStdout=1; followLinks=1; FIO_setPassThroughFlag(prefs, 1); outFileName=stdoutmark; g_displayLevel=1; } /* behave like gzcat, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like lzma */ + if (exeNameMatch(programName, ZSTD_UNLZMA)) { operation=zom_decompress; cType = FIO_lzmaCompression; removeSrcFile=1; } /* behave like unlzma, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like xz */ + if (exeNameMatch(programName, ZSTD_UNXZ)) { operation=zom_decompress; cType = FIO_xzCompression; removeSrcFile=1; } /* behave like unxz, also supports multiple formats */ + if (exeNameMatch(programName, ZSTD_LZ4)) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; } /* behave like lz4 */ + if (exeNameMatch(programName, ZSTD_UNLZ4)) { operation=zom_decompress; cType = FIO_lz4Compression; } /* behave like unlz4, also supports multiple formats */ memset(&compressionParams, 0, sizeof(compressionParams)); /* init crash handler */ @@ -905,43 +961,57 @@ if (!strcmp(argument, "--help")) { usage_advanced(programName); CLEAN_RETURN(0); } if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; } if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; } - if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; g_displayLevel-=(g_displayLevel==2); continue; } + if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; removeSrcFile=0; continue; } if (!strcmp(argument, "--ultra")) { ultra=1; continue; } if (!strcmp(argument, "--check")) { FIO_setChecksumFlag(prefs, 2); continue; } if (!strcmp(argument, "--no-check")) { FIO_setChecksumFlag(prefs, 0); continue; } if (!strcmp(argument, "--sparse")) { FIO_setSparseWrite(prefs, 2); continue; } if (!strcmp(argument, "--no-sparse")) { FIO_setSparseWrite(prefs, 0); continue; } + if (!strcmp(argument, "--pass-through")) { FIO_setPassThroughFlag(prefs, 1); continue; } + if (!strcmp(argument, "--no-pass-through")) { FIO_setPassThroughFlag(prefs, 0); continue; } if (!strcmp(argument, "--test")) { operation=zom_test; continue; } + if (!strcmp(argument, "--asyncio")) { FIO_setAsyncIOFlag(prefs, 1); continue;} + if (!strcmp(argument, "--no-asyncio")) { FIO_setAsyncIOFlag(prefs, 0); continue;} if (!strcmp(argument, "--train")) { operation=zom_train; if (outFileName==NULL) outFileName=g_defaultDictName; continue; } if (!strcmp(argument, "--no-dictID")) { FIO_setDictIDFlag(prefs, 0); continue; } - if (!strcmp(argument, "--keep")) { FIO_setRemoveSrcFile(prefs, 0); continue; } - if (!strcmp(argument, "--rm")) { FIO_setRemoveSrcFile(prefs, 1); continue; } + if (!strcmp(argument, "--keep")) { removeSrcFile=0; continue; } + if (!strcmp(argument, "--rm")) { removeSrcFile=1; continue; } if (!strcmp(argument, "--priority=rt")) { setRealTimePrio = 1; continue; } if (!strcmp(argument, "--show-default-cparams")) { showDefaultCParams = 1; continue; } if (!strcmp(argument, "--content-size")) { contentSize = 1; continue; } if (!strcmp(argument, "--no-content-size")) { contentSize = 0; continue; } if (!strcmp(argument, "--adapt")) { adapt = 1; continue; } - if (!strcmp(argument, "--no-row-match-finder")) { useRowMatchFinder = 1; continue; } - if (!strcmp(argument, "--row-match-finder")) { useRowMatchFinder = 2; continue; } + if (!strcmp(argument, "--no-row-match-finder")) { useRowMatchFinder = ZSTD_ps_disable; continue; } + if (!strcmp(argument, "--row-match-finder")) { useRowMatchFinder = ZSTD_ps_enable; continue; } if (longCommandWArg(&argument, "--adapt=")) { adapt = 1; if (!parseAdaptParameters(argument, &adaptMin, &adaptMax)) { badusage(programName); CLEAN_RETURN(1); } continue; } if (!strcmp(argument, "--single-thread")) { nbWorkers = 0; singleThread = 1; continue; } - if (!strcmp(argument, "--format=zstd")) { suffix = ZSTD_EXTENSION; FIO_setCompressionType(prefs, FIO_zstdCompression); continue; } + if (!strcmp(argument, "--format=zstd")) { suffix = ZSTD_EXTENSION; cType = FIO_zstdCompression; continue; } + if (!strcmp(argument, "--mmap-dict")) { mmapDict = ZSTD_ps_enable; continue; } + if (!strcmp(argument, "--no-mmap-dict")) { mmapDict = ZSTD_ps_disable; continue; } #ifdef ZSTD_GZCOMPRESS - if (!strcmp(argument, "--format=gzip")) { suffix = GZ_EXTENSION; FIO_setCompressionType(prefs, FIO_gzipCompression); continue; } + if (!strcmp(argument, "--format=gzip")) { suffix = GZ_EXTENSION; cType = FIO_gzipCompression; continue; } + if (exeNameMatch(programName, ZSTD_GZ)) { /* behave like gzip */ + if (!strcmp(argument, "--best")) { dictCLevel = cLevel = 9; continue; } + if (!strcmp(argument, "--no-name")) { /* ignore for now */; continue; } + } #endif #ifdef ZSTD_LZMACOMPRESS - if (!strcmp(argument, "--format=lzma")) { suffix = LZMA_EXTENSION; FIO_setCompressionType(prefs, FIO_lzmaCompression); continue; } - if (!strcmp(argument, "--format=xz")) { suffix = XZ_EXTENSION; FIO_setCompressionType(prefs, FIO_xzCompression); continue; } + if (!strcmp(argument, "--format=lzma")) { suffix = LZMA_EXTENSION; cType = FIO_lzmaCompression; continue; } + if (!strcmp(argument, "--format=xz")) { suffix = XZ_EXTENSION; cType = FIO_xzCompression; continue; } #endif #ifdef ZSTD_LZ4COMPRESS - if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; FIO_setCompressionType(prefs, FIO_lz4Compression); continue; } + if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; cType = FIO_lz4Compression; continue; } #endif if (!strcmp(argument, "--rsyncable")) { rsyncable = 1; continue; } if (!strcmp(argument, "--compress-literals")) { literalCompressionMode = ZSTD_ps_enable; continue; } if (!strcmp(argument, "--no-compress-literals")) { literalCompressionMode = ZSTD_ps_disable; continue; } - if (!strcmp(argument, "--no-progress")) { FIO_setProgressSetting(FIO_ps_never); continue; } - if (!strcmp(argument, "--progress")) { FIO_setProgressSetting(FIO_ps_always); continue; } + if (!strcmp(argument, "--no-progress")) { progress = FIO_ps_never; continue; } + if (!strcmp(argument, "--progress")) { progress = FIO_ps_always; continue; } if (!strcmp(argument, "--exclude-compressed")) { FIO_setExcludeCompressedFile(prefs, 1); continue; } + if (!strcmp(argument, "--fake-stdin-is-console")) { UTIL_fakeStdinIsConsole(); continue; } + if (!strcmp(argument, "--fake-stdout-is-console")) { UTIL_fakeStdoutIsConsole(); continue; } + if (!strcmp(argument, "--fake-stderr-is-console")) { UTIL_fakeStderrIsConsole(); continue; } + if (!strcmp(argument, "--trace-file-stat")) { UTIL_traceFileStat(); continue; } /* long commands with arguments */ #ifndef ZSTD_NODICT @@ -983,15 +1053,21 @@ if (longCommandWArg(&argument, "--memlimit")) { NEXT_UINT32(memLimit); continue; } if (longCommandWArg(&argument, "--memory")) { NEXT_UINT32(memLimit); continue; } if (longCommandWArg(&argument, "--memlimit-decompress")) { NEXT_UINT32(memLimit); continue; } - if (longCommandWArg(&argument, "--block-size=")) { blockSize = readSizeTFromChar(&argument); continue; } + if (longCommandWArg(&argument, "--block-size")) { NEXT_TSIZE(blockSize); continue; } if (longCommandWArg(&argument, "--maxdict")) { NEXT_UINT32(maxDictSize); continue; } if (longCommandWArg(&argument, "--dictID")) { NEXT_UINT32(dictID); continue; } - if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) { badusage(programName); CLEAN_RETURN(1); } continue; } - if (longCommandWArg(&argument, "--stream-size=")) { streamSrcSize = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--target-compressed-block-size=")) { targetCBlockSize = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--size-hint=")) { srcSizeHint = readSizeTFromChar(&argument); continue; } - if (longCommandWArg(&argument, "--output-dir-flat")) { NEXT_FIELD(outDirName); continue; } -#ifdef ZSTD_MULTITHREAD + if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) { badusage(programName); CLEAN_RETURN(1); } ; cType = FIO_zstdCompression; continue; } + if (longCommandWArg(&argument, "--stream-size")) { NEXT_TSIZE(streamSrcSize); continue; } + if (longCommandWArg(&argument, "--target-compressed-block-size")) { NEXT_TSIZE(targetCBlockSize); continue; } + if (longCommandWArg(&argument, "--size-hint")) { NEXT_TSIZE(srcSizeHint); continue; } + if (longCommandWArg(&argument, "--output-dir-flat")) { + NEXT_FIELD(outDirName); + if (strlen(outDirName) == 0) { + DISPLAYLEVEL(1, "error: output dir cannot be empty string (did you mean to pass '.' instead?)\n"); + CLEAN_RETURN(1); + } + continue; + } if (longCommandWArg(&argument, "--auto-threads")) { const char* threadDefault = NULL; NEXT_FIELD(threadDefault); @@ -999,9 +1075,15 @@ defaultLogicalCores = 1; continue; } -#endif #ifdef UTIL_HAS_MIRRORFILELIST - if (longCommandWArg(&argument, "--output-dir-mirror")) { NEXT_FIELD(outMirroredDirName); continue; } + if (longCommandWArg(&argument, "--output-dir-mirror")) { + NEXT_FIELD(outMirroredDirName); + if (strlen(outMirroredDirName) == 0) { + DISPLAYLEVEL(1, "error: output dir cannot be empty string (did you mean to pass '.' instead?)\n"); + CLEAN_RETURN(1); + } + continue; + } #endif #ifndef ZSTD_NOTRACE if (longCommandWArg(&argument, "--trace")) { char const* traceFile; NEXT_FIELD(traceFile); TRACE_enable(traceFile); continue; } @@ -1018,6 +1100,8 @@ /* Invalid character following --long */ badusage(programName); CLEAN_RETURN(1); + } else { + ldmWindowLog = g_defaultMaxWindowLog; } /* Only set windowLog if not already set by --zstd */ if (compressionParams.windowLog == 0) @@ -1075,8 +1159,8 @@ { /* Display help */ case 'V': printVersion(); CLEAN_RETURN(0); /* Version Only */ - case 'H': - case 'h': usage_advanced(programName); CLEAN_RETURN(0); + case 'H': usage_advanced(programName); CLEAN_RETURN(0); + case 'h': usage(stdout, programName); CLEAN_RETURN(0); /* Compress */ case 'z': operation=zom_compress; argument++; break; @@ -1090,7 +1174,10 @@ operation=zom_decompress; argument++; break; /* Force stdout, even if stdout==console */ - case 'c': forceStdout=1; outFileName=stdoutmark; argument++; break; + case 'c': forceStdout=1; outFileName=stdoutmark; removeSrcFile=0; argument++; break; + + /* do not store filename - gzip compatibility - nothing to do */ + case 'n': argument++; break; /* Use file content as dictionary */ case 'D': argument++; NEXT_FIELD(dictFileName); break; @@ -1105,7 +1192,7 @@ case 'q': g_displayLevel--; argument++; break; /* keep source file (default) */ - case 'k': FIO_setRemoveSrcFile(prefs, 0); argument++; break; + case 'k': removeSrcFile=0; argument++; break; /* Checksum */ case 'C': FIO_setChecksumFlag(prefs, 2); argument++; break; @@ -1164,7 +1251,7 @@ /* nb of threads (hidden option) */ case 'T': argument++; - nbWorkers = (int)readU32FromChar(&argument); + nbWorkers = readU32FromChar(&argument); break; /* Dictionary Selection level */ @@ -1204,18 +1291,21 @@ DISPLAYLEVEL(3, WELCOME_MESSAGE); #ifdef ZSTD_MULTITHREAD + if ((operation==zom_decompress) && (!singleThread) && (nbWorkers > 1)) { + DISPLAYLEVEL(2, "Warning : decompression does not support multi-threading\n"); + } if ((nbWorkers==0) && (!singleThread)) { /* automatically set # workers based on # of reported cpus */ if (defaultLogicalCores) { - nbWorkers = UTIL_countLogicalCores(); + nbWorkers = (unsigned)UTIL_countLogicalCores(); DISPLAYLEVEL(3, "Note: %d logical core(s) detected \n", nbWorkers); } else { - nbWorkers = UTIL_countPhysicalCores(); + nbWorkers = (unsigned)UTIL_countPhysicalCores(); DISPLAYLEVEL(3, "Note: %d physical core(s) detected \n", nbWorkers); } } #else - (void)singleThread; (void)nbWorkers; + (void)singleThread; (void)nbWorkers; (void)defaultLogicalCores; #endif g_utilDisplayLevel = g_displayLevel; @@ -1251,6 +1341,8 @@ } } + nbInputFileNames = filenames->tableSize; /* saving number of input files */ + if (recursive) { /* at this stage, filenameTable is a list of paths, which can contain both files and directories */ UTIL_expandFNT(&filenames, followLinks); } @@ -1263,7 +1355,7 @@ int const ret = FIO_listMultipleFiles((unsigned)filenames->tableSize, filenames->fileNames, g_displayLevel); CLEAN_RETURN(ret); #else - DISPLAY("file information is not supported \n"); + DISPLAYLEVEL(1, "file information is not supported \n"); CLEAN_RETURN(1); #endif } @@ -1271,14 +1363,18 @@ /* Check if benchmark is selected */ if (operation==zom_bench) { #ifndef ZSTD_NOBENCH + if (cType != FIO_zstdCompression) { + DISPLAYLEVEL(1, "benchmark mode is only compatible with zstd format \n"); + CLEAN_RETURN(1); + } benchParams.blockSize = blockSize; - benchParams.nbWorkers = nbWorkers; + benchParams.nbWorkers = (int)nbWorkers; benchParams.realTime = (unsigned)setRealTimePrio; benchParams.nbSeconds = bench_nbSeconds; benchParams.ldmFlag = ldmFlag; benchParams.ldmMinMatch = (int)g_ldmMinMatch; benchParams.ldmHashLog = (int)g_ldmHashLog; - benchParams.useRowMatchFinder = useRowMatchFinder; + benchParams.useRowMatchFinder = (int)useRowMatchFinder; if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) { benchParams.ldmBucketSizeLog = (int)g_ldmBucketSizeLog; } @@ -1299,15 +1395,15 @@ int c; DISPLAYLEVEL(3, "Benchmarking %s \n", filenames->fileNames[i]); for(c = cLevel; c <= cLevelLast; c++) { - BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams); + operationResult = BMK_benchFilesAdvanced(&filenames->fileNames[i], 1, dictFileName, c, &compressionParams, g_displayLevel, &benchParams); } } } else { for(; cLevel <= cLevelLast; cLevel++) { - BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, &compressionParams, g_displayLevel, &benchParams); + operationResult = BMK_benchFilesAdvanced(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, &compressionParams, g_displayLevel, &benchParams); } } } else { for(; cLevel <= cLevelLast; cLevel++) { - BMK_syntheticTest(cLevel, compressibility, &compressionParams, g_displayLevel, &benchParams); + operationResult = BMK_syntheticTest(cLevel, compressibility, &compressionParams, g_displayLevel, &benchParams); } } #else @@ -1349,24 +1445,34 @@ } #ifndef ZSTD_NODECOMPRESS - if (operation==zom_test) { FIO_setTestMode(prefs, 1); outFileName=nulmark; FIO_setRemoveSrcFile(prefs, 0); } /* test mode */ + if (operation==zom_test) { FIO_setTestMode(prefs, 1); outFileName=nulmark; removeSrcFile=0; } /* test mode */ #endif /* No input filename ==> use stdin and stdout */ - if (filenames->tableSize == 0) UTIL_refFilename(filenames, stdinmark); - if (!strcmp(filenames->fileNames[0], stdinmark) && !outFileName) + if (filenames->tableSize == 0) { + /* It is possible that the input + was a number of empty directories. In this case + stdin and stdout should not be used */ + if (nbInputFileNames > 0 ){ + DISPLAYLEVEL(1, "please provide correct input file(s) or non-empty directories -- ignored \n"); + CLEAN_RETURN(0); + } + UTIL_refFilename(filenames, stdinmark); + } + + if (filenames->tableSize == 1 && !strcmp(filenames->fileNames[0], stdinmark) && !outFileName) outFileName = stdoutmark; /* when input is stdin, default output is stdout */ /* Check if input/output defined as console; trigger an error in this case */ if (!forceStdin - && !strcmp(filenames->fileNames[0], stdinmark) - && IS_CONSOLE(stdin) ) { + && (UTIL_searchFileNamesTable(filenames, stdinmark) != -1) + && UTIL_isConsole(stdin) ) { DISPLAYLEVEL(1, "stdin is a console, aborting\n"); CLEAN_RETURN(1); } - if ( outFileName && !strcmp(outFileName, stdoutmark) - && IS_CONSOLE(stdout) - && !strcmp(filenames->fileNames[0], stdinmark) + if ( (!outFileName || !strcmp(outFileName, stdoutmark)) + && UTIL_isConsole(stdout) + && (UTIL_searchFileNamesTable(filenames, stdinmark) != -1) && !forceStdout && operation!=zom_decompress ) { DISPLAYLEVEL(1, "stdout is a console, aborting\n"); @@ -1384,25 +1490,35 @@ if (showDefaultCParams) { if (operation == zom_decompress) { - DISPLAY("error : can't use --show-default-cparams in decomrpession mode \n"); + DISPLAYLEVEL(1, "error : can't use --show-default-cparams in decompression mode \n"); CLEAN_RETURN(1); } } if (dictFileName != NULL && patchFromDictFileName != NULL) { - DISPLAY("error : can't use -D and --patch-from=# at the same time \n"); + DISPLAYLEVEL(1, "error : can't use -D and --patch-from=# at the same time \n"); CLEAN_RETURN(1); } if (patchFromDictFileName != NULL && filenames->tableSize > 1) { - DISPLAY("error : can't use --patch-from=# on multiple files \n"); + DISPLAYLEVEL(1, "error : can't use --patch-from=# on multiple files \n"); CLEAN_RETURN(1); } - /* No status message in pipe mode (stdin - stdout) */ + /* No status message by default when output is stdout */ hasStdout = outFileName && !strcmp(outFileName,stdoutmark); + if (hasStdout && (g_displayLevel==2)) g_displayLevel=1; + + /* when stderr is not the console, do not pollute it with progress updates (unless requested) */ + if (!UTIL_isConsole(stderr) && (progress!=FIO_ps_always)) progress=FIO_ps_never; + FIO_setProgressSetting(progress); - if ((hasStdout || !IS_CONSOLE(stderr)) && (g_displayLevel==2)) g_displayLevel=1; + /* don't remove source files when output is stdout */; + if (hasStdout && removeSrcFile) { + DISPLAYLEVEL(3, "Note: src files are not removed when output is stdout \n"); + removeSrcFile = 0; + } + FIO_setRemoveSrcFile(prefs, removeSrcFile); /* IO Stream/File */ FIO_setHasStdoutOutput(fCtx, hasStdout); @@ -1411,6 +1527,7 @@ FIO_setNotificationLevel(g_displayLevel); FIO_setAllowBlockDevices(prefs, allowBlockDevices); FIO_setPatchFromMode(prefs, patchFromDictFileName != NULL); + FIO_setMMapDict(prefs, mmapDict); if (memLimit == 0) { if (compressionParams.windowLog == 0) { memLimit = (U32)1 << g_defaultMaxWindowLog; @@ -1422,8 +1539,9 @@ FIO_setMemLimit(prefs, memLimit); if (operation==zom_compress) { #ifndef ZSTD_NOCOMPRESS + FIO_setCompressionType(prefs, cType); FIO_setContentSize(prefs, contentSize); - FIO_setNbWorkers(prefs, nbWorkers); + FIO_setNbWorkers(prefs, (int)nbWorkers); FIO_setBlockSize(prefs, (int)blockSize); if (g_overlapLog!=OVERLAP_LOG_DEFAULT) FIO_setOverlapLog(prefs, (int)g_overlapLog); FIO_setLdmFlag(prefs, (unsigned)ldmFlag); @@ -1431,8 +1549,8 @@ FIO_setLdmMinMatch(prefs, (int)g_ldmMinMatch); if (g_ldmBucketSizeLog != LDM_PARAM_DEFAULT) FIO_setLdmBucketSizeLog(prefs, (int)g_ldmBucketSizeLog); if (g_ldmHashRateLog != LDM_PARAM_DEFAULT) FIO_setLdmHashRateLog(prefs, (int)g_ldmHashRateLog); - FIO_setAdaptiveMode(prefs, (unsigned)adapt); - FIO_setUseRowMatchFinder(prefs, useRowMatchFinder); + FIO_setAdaptiveMode(prefs, adapt); + FIO_setUseRowMatchFinder(prefs, (int)useRowMatchFinder); FIO_setAdaptMin(prefs, adaptMin); FIO_setAdaptMax(prefs, adaptMax); FIO_setRsyncable(prefs, rsyncable); @@ -1440,6 +1558,7 @@ FIO_setTargetCBlockSize(prefs, targetCBlockSize); FIO_setSrcSizeHint(prefs, srcSizeHint); FIO_setLiteralCompressionMode(prefs, literalCompressionMode); + FIO_setSparseWrite(prefs, 0); if (adaptMin > cLevel) cLevel = adaptMin; if (adaptMax < cLevel) cLevel = adaptMax; @@ -1465,8 +1584,12 @@ else operationResult = FIO_compressMultipleFilenames(fCtx, prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, suffix, dictFileName, cLevel, compressionParams); #else - (void)contentSize; (void)suffix; (void)adapt; (void)rsyncable; (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; (void)ZSTD_strategyMap; (void)useRowMatchFinder; /* not used when ZSTD_NOCOMPRESS set */ - DISPLAY("Compression not supported \n"); + /* these variables are only used when compression mode is enabled */ + (void)contentSize; (void)suffix; (void)adapt; (void)rsyncable; + (void)ultra; (void)cLevel; (void)ldmFlag; (void)literalCompressionMode; + (void)targetCBlockSize; (void)streamSrcSize; (void)srcSizeHint; + (void)ZSTD_strategyMap; (void)useRowMatchFinder; (void)cType; + DISPLAYLEVEL(1, "Compression not supported \n"); #endif } else { /* decompression or test */ #ifndef ZSTD_NODECOMPRESS @@ -1476,7 +1599,7 @@ operationResult = FIO_decompressMultipleFilenames(fCtx, prefs, filenames->fileNames, outMirroredDirName, outDirName, outFileName, dictFileName); } #else - DISPLAY("Decompression not supported \n"); + DISPLAYLEVEL(1, "Decompression not supported \n"); #endif } diff --git a/sys/contrib/zstd/programs/zstdcli_trace.h b/sys/contrib/zstd/programs/zstdcli_trace.h --- a/sys/contrib/zstd/programs/zstdcli_trace.h +++ b/sys/contrib/zstd/programs/zstdcli_trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/programs/zstdcli_trace.c b/sys/contrib/zstd/programs/zstdcli_trace.c --- a/sys/contrib/zstd/programs/zstdcli_trace.c +++ b/sys/contrib/zstd/programs/zstdcli_trace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the diff --git a/sys/contrib/zstd/programs/zstdgrep.1 b/sys/contrib/zstd/programs/zstdgrep.1 --- a/sys/contrib/zstd/programs/zstdgrep.1 +++ b/sys/contrib/zstd/programs/zstdgrep.1 @@ -1,26 +1,17 @@ -. -.TH "ZSTDGREP" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTDGREP" "1" "March 2023" "zstd 1.5.5" "User Commands" .SH "NAME" \fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \.\.\.] -. +\fBzstdgrep\fR [\fIgrep\-flags\fR] [\-\-] \fIpattern\fR [\fIfiles\fR \|\.\|\.\|\.] .SH "DESCRIPTION" -\fBzstdgrep\fR runs \fBgrep (1)\fR on files, or \fBstdin\fR if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. +\fBzstdgrep\fR runs \fBgrep\fR(1) on files, or \fBstdin\fR if no files argument is given, after decompressing them with \fBzstdcat\fR(1)\. .P -The grep\-flags and pattern arguments are passed on to \fBgrep (1)\fR\. If an \fB\-e\fR flag is found in the \fBgrep\-flags\fR, \fBzstdgrep\fR will not look for a pattern argument\. -. +The \fIgrep\-flags\fR and \fIpattern\fR arguments are passed on to \fBgrep\fR(1)\. If an \fB\-e\fR flag is found in the \fIgrep\-flags\fR, \fBzstdgrep\fR will not look for a \fIpattern\fR argument\. .P -Note that modern \fBgrep\fR alternatives such as \fBripgrep\fR (\fBrg\fR) support \fBzstd\fR\-compressed files out of the box, and can prove better alternatives than \fBzstdgrep\fR notably for unsupported complex pattern searches\. Note though that such alternatives may also feature some minor command line differences\. -. +Note that modern \fBgrep\fR alternatives such as \fBripgrep\fR (\fBrg\fR(1)) support \fBzstd\fR\-compressed files out of the box, and can prove better alternatives than \fBzstdgrep\fR notably for unsupported complex pattern searches\. Note though that such alternatives may also feature some minor command line differences\. .SH "EXIT STATUS" In case of missing arguments or missing pattern, 1 will be returned, otherwise 0\. -. .SH "SEE ALSO" -\fBzstd (1)\fR -. +\fBzstd\fR(1) .SH "AUTHORS" Thomas Klausner \fIwiz@NetBSD\.org\fR diff --git a/sys/contrib/zstd/programs/zstdgrep.1.md b/sys/contrib/zstd/programs/zstdgrep.1.md --- a/sys/contrib/zstd/programs/zstdgrep.1.md +++ b/sys/contrib/zstd/programs/zstdgrep.1.md @@ -4,16 +4,16 @@ SYNOPSIS -------- -`zstdgrep` [*grep-flags*] [--] _pattern_ [_files_ ...] +`zstdgrep` [] [--] [ ...] DESCRIPTION ----------- -`zstdgrep` runs `grep (1)` on files, or `stdin` if no files argument is given, after decompressing them with `zstdcat (1)`. +`zstdgrep` runs `grep`(1) on files, or `stdin` if no files argument is given, after decompressing them with `zstdcat`(1). -The grep-flags and pattern arguments are passed on to `grep (1)`. If an `-e` flag is found in the `grep-flags`, `zstdgrep` will not look for a pattern argument. +The and arguments are passed on to `grep`(1). If an `-e` flag is found in the , `zstdgrep` will not look for a argument. -Note that modern `grep` alternatives such as `ripgrep` (`rg`) support `zstd`-compressed files out of the box, +Note that modern `grep` alternatives such as `ripgrep` (`rg`(1)) support `zstd`-compressed files out of the box, and can prove better alternatives than `zstdgrep` notably for unsupported complex pattern searches. Note though that such alternatives may also feature some minor command line differences. @@ -23,7 +23,7 @@ SEE ALSO -------- -`zstd (1)` +`zstd`(1) AUTHORS ------- diff --git a/sys/contrib/zstd/programs/zstdless b/sys/contrib/zstd/programs/zstdless --- a/sys/contrib/zstd/programs/zstdless +++ b/sys/contrib/zstd/programs/zstdless @@ -1,2 +1,8 @@ #!/bin/sh -zstdcat "$@" | less + +zstd=${ZSTD:-zstd} + +# TODO: Address quirks and bugs tied to old versions of less, provide a mechanism to pass flags directly to zstd + +export LESSOPEN="|-${zstd} -cdfq %s" +exec less "$@" diff --git a/sys/contrib/zstd/programs/zstdless.1 b/sys/contrib/zstd/programs/zstdless.1 --- a/sys/contrib/zstd/programs/zstdless.1 +++ b/sys/contrib/zstd/programs/zstdless.1 @@ -1,14 +1,9 @@ -. -.TH "ZSTDLESS" "1" "January 2022" "zstd 1.5.2" "User Commands" -. +.TH "ZSTDLESS" "1" "March 2023" "zstd 1.5.5" "User Commands" .SH "NAME" \fBzstdless\fR \- view zstandard\-compressed files -. .SH "SYNOPSIS" -\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \.\.\.] -. +\fBzstdless\fR [\fIflags\fR] [\fIfile\fR \|\.\|\.\|\.] .SH "DESCRIPTION" -\fBzstdless\fR runs \fBless (1)\fR on files or stdin, if no files argument is given, after decompressing them with \fBzstdcat (1)\fR\. -. +\fBzstdless\fR runs \fBless\fR(1) on files or stdin, if no \fIfile\fR argument is given, after decompressing them with \fBzstdcat\fR(1)\. .SH "SEE ALSO" -\fBzstd (1)\fR +\fBzstd\fR(1) diff --git a/sys/contrib/zstd/programs/zstdless.1.md b/sys/contrib/zstd/programs/zstdless.1.md --- a/sys/contrib/zstd/programs/zstdless.1.md +++ b/sys/contrib/zstd/programs/zstdless.1.md @@ -4,13 +4,13 @@ SYNOPSIS -------- -`zstdless` [*flags*] [_file_ ...] +`zstdless` [] [ ...] DESCRIPTION ----------- -`zstdless` runs `less (1)` on files or stdin, if no files argument is given, after decompressing them with `zstdcat (1)`. +`zstdless` runs `less`(1) on files or stdin, if no argument is given, after decompressing them with `zstdcat`(1). SEE ALSO -------- -`zstd (1)` +`zstd`(1) diff --git a/sys/contrib/zstd/zlibWrapper/Makefile b/sys/contrib/zstd/zlibWrapper/Makefile --- a/sys/contrib/zstd/zlibWrapper/Makefile +++ b/sys/contrib/zstd/zlibWrapper/Makefile @@ -66,9 +66,10 @@ ./zwrapbench -qi1b3B1K $(TEST_FILE) ./zwrapbench -rqi1b1e3 ../lib -#valgrindTest: ZSTDLIBRARY = $(ZSTDLIBDIR)/libzstd.so -valgrindTest: VALGRIND = LD_LIBRARY_PATH=$(ZSTDLIBDIR) valgrind --track-origins=yes --leak-check=full --error-exitcode=1 -valgrindTest: clean example fitblk example_zstd fitblk_zstd zwrapbench +.PHONY: test-valgrind +#test-valgrind: ZSTDLIBRARY = $(ZSTDLIBDIR)/libzstd.so +test-valgrind: VALGRIND = LD_LIBRARY_PATH=$(ZSTDLIBDIR) valgrind --track-origins=yes --leak-check=full --error-exitcode=1 +test-valgrind: clean example fitblk example_zstd fitblk_zstd zwrapbench @echo "\n ---- valgrind tests ----" $(VALGRIND) ./example $(VALGRIND) ./example_zstd diff --git a/sys/contrib/zstd/zlibWrapper/README.md b/sys/contrib/zstd/zlibWrapper/README.md --- a/sys/contrib/zstd/zlibWrapper/README.md +++ b/sys/contrib/zstd/zlibWrapper/README.md @@ -1,7 +1,7 @@ Zstandard wrapper for zlib ================================ -The main objective of creating a zstd wrapper for [zlib](http://zlib.net/) is to allow a quick and smooth transition to zstd for projects already using zlib. +The main objective of creating a zstd wrapper for [zlib](https://zlib.net/) is to allow a quick and smooth transition to zstd for projects already using zlib. #### Required files @@ -43,7 +43,7 @@ #### Example -We have take the file `test/example.c` from [the zlib library distribution](http://zlib.net/) and copied it to [zlibWrapper/examples/example.c](examples/example.c). +We have taken the file `test/example.c` from [the zlib library distribution](https://zlib.net/) and copied it to [zlibWrapper/examples/example.c](examples/example.c). After compilation and execution it shows the following results: ``` zlib version 1.2.8 = 0x1280, compile flags = 0x65 diff --git a/sys/contrib/zstd/zlibWrapper/examples/example.c b/sys/contrib/zstd/zlibWrapper/examples/example.c --- a/sys/contrib/zstd/zlibWrapper/examples/example.c +++ b/sys/contrib/zstd/zlibWrapper/examples/example.c @@ -56,26 +56,26 @@ const char dictionary[] = "hello, hello!"; uLong dictId; /* Adler32 value of the dictionary */ -void test_deflate OF((Byte *compr, uLong comprLen)); -void test_inflate OF((Byte *compr, uLong comprLen, +void test_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_deflate OF((Byte *compr, uLong comprLen, +void test_large_deflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_inflate OF((Byte *compr, uLong comprLen, +void test_large_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_flush OF((Byte *compr, uLong *comprLen)); -void test_sync OF((Byte *compr, uLong comprLen, +void test_flush _Z_OF((Byte *compr, uLong *comprLen)); +void test_sync _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_dict_deflate OF((Byte *compr, uLong comprLen)); -void test_dict_inflate OF((Byte *compr, uLong comprLen, +void test_dict_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_dict_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -int main OF((int argc, char *argv[])); +int main _Z_OF((int argc, char *argv[])); #ifdef Z_SOLO -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -102,9 +102,9 @@ static alloc_func zalloc = (alloc_func)0; static free_func zfree = (free_func)0; -void test_compress OF((Byte *compr, uLong comprLen, +void test_compress _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_gzio OF((const char *fname, +void test_gzio _Z_OF((const char *fname, Byte *uncompr, uLong uncomprLen)); /* =========================================================================== diff --git a/sys/contrib/zstd/zlibWrapper/examples/example_original.c b/sys/contrib/zstd/zlibWrapper/examples/example_original.c --- a/sys/contrib/zstd/zlibWrapper/examples/example_original.c +++ b/sys/contrib/zstd/zlibWrapper/examples/example_original.c @@ -51,26 +51,26 @@ const char dictionary[] = "hello"; uLong dictId; /* Adler32 value of the dictionary */ -void test_deflate OF((Byte *compr, uLong comprLen)); -void test_inflate OF((Byte *compr, uLong comprLen, +void test_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_deflate OF((Byte *compr, uLong comprLen, +void test_large_deflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_large_inflate OF((Byte *compr, uLong comprLen, +void test_large_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_flush OF((Byte *compr, uLong *comprLen)); -void test_sync OF((Byte *compr, uLong comprLen, +void test_flush _Z_OF((Byte *compr, uLong *comprLen)); +void test_sync _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_dict_deflate OF((Byte *compr, uLong comprLen)); -void test_dict_inflate OF((Byte *compr, uLong comprLen, +void test_dict_deflate _Z_OF((Byte *compr, uLong comprLen)); +void test_dict_inflate _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -int main OF((int argc, char *argv[])); +int main _Z_OF((int argc, char *argv[])); #ifdef Z_SOLO -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -94,9 +94,9 @@ static alloc_func zalloc = (alloc_func)0; static free_func zfree = (free_func)0; -void test_compress OF((Byte *compr, uLong comprLen, +void test_compress _Z_OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); -void test_gzio OF((const char *fname, +void test_gzio _Z_OF((const char *fname, Byte *uncompr, uLong uncomprLen)); /* =========================================================================== diff --git a/sys/contrib/zstd/zlibWrapper/examples/minigzip.c b/sys/contrib/zstd/zlibWrapper/examples/minigzip.c --- a/sys/contrib/zstd/zlibWrapper/examples/minigzip.c +++ b/sys/contrib/zstd/zlibWrapper/examples/minigzip.c @@ -3,7 +3,7 @@ /* minigzip.c -- simulate gzip using the zlib compression library * Copyright (C) 1995-2006, 2010, 2011 Jean-loup Gailly. - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ /* @@ -64,7 +64,7 @@ #if !defined(Z_HAVE_UNISTD_H) && !defined(_LARGEFILE64_SOURCE) #ifndef WIN32 /* unlink already in stdio.h for WIN32 */ - extern int unlink OF((const char *)); + extern int unlink _Z_OF((const char *)); #endif #endif @@ -154,8 +154,8 @@ # include /* for unlink() */ #endif -void *myalloc OF((void *, unsigned, unsigned)); -void myfree OF((void *, void *)); +void *myalloc _Z_OF((void *, unsigned, unsigned)); +void myfree _Z_OF((void *, void *)); void *myalloc(q, n, m) void *q; @@ -180,9 +180,9 @@ z_stream strm; } *gzFile; -gzFile gzopen OF((const char *, const char *)); -gzFile gzdopen OF((int, const char *)); -gzFile gz_open OF((const char *, int, const char *)); +gzFile gzopen _Z_OF((const char *, const char *)); +gzFile gzdopen _Z_OF((int, const char *)); +gzFile gz_open _Z_OF((const char *, int, const char *)); gzFile gzopen(path, mode) const char *path; @@ -236,7 +236,7 @@ return gz; } -int gzwrite OF((gzFile, const void *, unsigned)); +int gzwrite _Z_OF((gzFile, const void *, unsigned)); int gzwrite(gz, buf, len) gzFile gz; @@ -260,7 +260,7 @@ return len; } -int gzread OF((gzFile, void *, unsigned)); +int gzread _Z_OF((gzFile, void *, unsigned)); int gzread(gz, buf, len) gzFile gz; @@ -297,7 +297,7 @@ return len - strm->avail_out; } -int gzclose OF((gzFile)); +int gzclose _Z_OF((gzFile)); int gzclose(gz) gzFile gz; @@ -326,7 +326,7 @@ return Z_OK; } -const char *gzerror OF((gzFile, int *)); +const char *gzerror _Z_OF((gzFile, int *)); const char *gzerror(gz, err) gzFile gz; @@ -340,15 +340,15 @@ char *prog; -void error OF((const char *msg)); -void gz_compress OF((FILE *in, gzFile out)); +void error _Z_OF((const char *msg)); +void gz_compress _Z_OF((FILE *in, gzFile out)); #ifdef USE_MMAP -int gz_compress_mmap OF((FILE *in, gzFile out)); +int gz_compress_mmap _Z_OF((FILE *in, gzFile out)); #endif -void gz_uncompress OF((gzFile in, FILE *out)); -void file_compress OF((char *file, char *mode)); -void file_uncompress OF((char *file)); -int main OF((int argc, char *argv[])); +void gz_uncompress _Z_OF((gzFile in, FILE *out)); +void file_compress _Z_OF((char *file, char *mode)); +void file_uncompress _Z_OF((char *file)); +int main _Z_OF((int argc, char *argv[])); /* =========================================================================== * Display error message and exit diff --git a/sys/contrib/zstd/zlibWrapper/examples/zwrapbench.c b/sys/contrib/zstd/zlibWrapper/examples/zwrapbench.c --- a/sys/contrib/zstd/zlibWrapper/examples/zwrapbench.c +++ b/sys/contrib/zstd/zlibWrapper/examples/zwrapbench.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -26,7 +26,7 @@ #include "datagen.h" /* RDG_genBuffer */ #include "xxhash.h" -#include "zstd_zlibwrapper.h" +#include "../zstd_zlibwrapper.h" @@ -109,17 +109,17 @@ static size_t g_blockSize = 0; int g_additionalParam = 0; -void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; } +static void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; } -void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; } +static void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; } -void BMK_SetNbIterations(unsigned nbLoops) +static void BMK_SetNbIterations(unsigned nbLoops) { g_nbIterations = nbLoops; DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbIterations); } -void BMK_SetBlockSize(size_t blockSize) +static void BMK_SetBlockSize(size_t blockSize) { g_blockSize = blockSize; DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10)); @@ -388,7 +388,7 @@ markNb = (markNb+1) % NB_MARKS; DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, - (double)srcSize / fastestC ); + (double)srcSize / (double)fastestC ); (void)fastestD; (void)crcOrig; /* unused when decompression disabled */ #if 1 @@ -428,8 +428,10 @@ ZSTD_DStream* zbd = ZSTD_createDStream(); size_t rSize; if (zbd == NULL) EXM_THROW(1, "ZSTD_createDStream() allocation failure"); - rSize = ZSTD_initDStream_usingDict(zbd, dictBuffer, dictBufferSize); - if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_initDStream() failed : %s", ZSTD_getErrorName(rSize)); + rSize = ZSTD_DCtx_reset(zbd, ZSTD_reset_session_only); + if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_reset() failed : %s", ZSTD_getErrorName(rSize)); + rSize = ZSTD_DCtx_loadDictionary(zbd, dictBuffer, dictBufferSize); + if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_loadDictionary() failed : %s", ZSTD_getErrorName(rSize)); do { U32 blockNb; for (blockNb=0; blockNb%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, - (double)srcSize / fastestC, - (double)srcSize / fastestD ); + (double)srcSize / (double)fastestC, + (double)srcSize / (double)fastestD ); /* CRC Checking */ { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); @@ -558,8 +560,8 @@ } /* for (testNb = 1; testNb <= (g_nbIterations + !g_nbIterations); testNb++) */ if (g_displayLevel == 1) { - double cSpeed = (double)srcSize / fastestC; - double dSpeed = (double)srcSize / fastestD; + double cSpeed = (double)srcSize / (double)fastestC; + double dSpeed = (double)srcSize / (double)fastestD; if (g_additionalParam) DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam); else @@ -796,7 +798,7 @@ } -int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, +static int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, const char* dictFileName, int cLevel, int cLevelLast) { double const compressibility = (double)g_compressibilityDefault / 100; @@ -836,7 +838,7 @@ DISPLAY( " -b# : benchmark file(s), using # compression level (default : %d) \n", ZSTDCLI_CLEVEL_DEFAULT); DISPLAY( " -e# : test all compression levels from -bX to # (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT); DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s)\n"); - DISPLAY( " -B# : cut file into independent blocks of size # (default: no block)\n"); + DISPLAY( " -B# : cut file into independent chunks of size # (default: no chunking)\n"); return 0; } diff --git a/sys/contrib/zstd/zlibWrapper/gzclose.c b/sys/contrib/zstd/zlibWrapper/gzclose.c --- a/sys/contrib/zstd/zlibWrapper/gzclose.c +++ b/sys/contrib/zstd/zlibWrapper/gzclose.c @@ -3,7 +3,7 @@ /* gzclose.c -- zlib gzclose() function * Copyright (C) 2004, 2010 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" diff --git a/sys/contrib/zstd/zlibWrapper/gzcompatibility.h b/sys/contrib/zstd/zlibWrapper/gzcompatibility.h --- a/sys/contrib/zstd/zlibWrapper/gzcompatibility.h +++ b/sys/contrib/zstd/zlibWrapper/gzcompatibility.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -11,10 +11,10 @@ #if ZLIB_VERNUM <= 0x1240 -ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); -ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); -ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); -ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_r _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzbuffer _Z_OF((gzFile file, unsigned size)); +ZEXTERN z_off_t ZEXPORT gzoffset _Z_OF((gzFile file)); #if !defined(_WIN32) && defined(Z_LARGE64) # define z_off64_t off64_t @@ -40,7 +40,7 @@ #if ZLIB_VERNUM <= 0x1270 #if defined(_WIN32) && !defined(Z_SOLO) # include /* for wchar_t */ -ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, +ZEXTERN gzFile ZEXPORT gzopen_w _Z_OF((const wchar_t *path, const char *mode)); #endif #endif @@ -61,8 +61,8 @@ # endif # undef z_longlong #endif -ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, +ZEXTERN z_size_t ZEXPORT gzfread _Z_OF((voidp buf, z_size_t size, z_size_t nitems, gzFile file)); -ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, +ZEXTERN z_size_t ZEXPORT gzfwrite _Z_OF((voidpc buf, z_size_t size, z_size_t nitems, gzFile file)); #endif diff --git a/sys/contrib/zstd/zlibWrapper/gzguts.h b/sys/contrib/zstd/zlibWrapper/gzguts.h --- a/sys/contrib/zstd/zlibWrapper/gzguts.h +++ b/sys/contrib/zstd/zlibWrapper/gzguts.h @@ -4,7 +4,7 @@ /* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #ifdef _LARGEFILE64_SOURCE @@ -126,8 +126,8 @@ /* gz* functions always use library allocation functions */ #ifndef STDC - extern voidp malloc OF((uInt size)); - extern void free OF((voidpf ptr)); + extern voidp malloc _Z_OF((uInt size)); + extern void free _Z_OF((voidpf ptr)); #endif /* get errno and strerror definition */ @@ -145,10 +145,10 @@ /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 - ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); - ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); - ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); - ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN gzFile ZEXPORT gzopen64 _Z_OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 _Z_OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 _Z_OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 _Z_OF((gzFile)); #endif /* default memLevel */ @@ -213,9 +213,9 @@ } gz_statep; /* shared functions */ -void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +void ZLIB_INTERNAL gz_error _Z_OF((gz_statep, int, const char *)); #if defined UNDER_CE -char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +char ZLIB_INTERNAL *gz_strwinerror _Z_OF((DWORD error)); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t @@ -224,6 +224,6 @@ #ifdef INT_MAX # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) #else -unsigned ZLIB_INTERNAL gz_intmax OF((void)); +unsigned ZLIB_INTERNAL gz_intmax _Z_OF((void)); # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) #endif diff --git a/sys/contrib/zstd/zlibWrapper/gzlib.c b/sys/contrib/zstd/zlibWrapper/gzlib.c --- a/sys/contrib/zstd/zlibWrapper/gzlib.c +++ b/sys/contrib/zstd/zlibWrapper/gzlib.c @@ -3,7 +3,7 @@ /* gzlib.c -- zlib functions common to reading and writing gzip files * Copyright (C) 2004-2017 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" @@ -19,8 +19,8 @@ #endif /* Local functions */ -local void gz_reset OF((gz_statep)); -local gzFile gz_open OF((const void *, int, const char *)); +local void gz_reset _Z_OF((gz_statep)); +local gzFile gz_open _Z_OF((const void *, int, const char *)); #if defined UNDER_CE diff --git a/sys/contrib/zstd/zlibWrapper/gzread.c b/sys/contrib/zstd/zlibWrapper/gzread.c --- a/sys/contrib/zstd/zlibWrapper/gzread.c +++ b/sys/contrib/zstd/zlibWrapper/gzread.c @@ -3,7 +3,7 @@ /* gzread.c -- zlib functions for reading gzip files * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include "gzguts.h" @@ -17,13 +17,13 @@ /* Local functions */ -local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); -local int gz_avail OF((gz_statep)); -local int gz_look OF((gz_statep)); -local int gz_decomp OF((gz_statep)); -local int gz_fetch OF((gz_statep)); -local int gz_skip OF((gz_statep, z_off64_t)); -local z_size_t gz_read OF((gz_statep, voidp, z_size_t)); +local int gz_load _Z_OF((gz_statep, unsigned char *, unsigned, unsigned *)); +local int gz_avail _Z_OF((gz_statep)); +local int gz_look _Z_OF((gz_statep)); +local int gz_decomp _Z_OF((gz_statep)); +local int gz_fetch _Z_OF((gz_statep)); +local int gz_skip _Z_OF((gz_statep, z_off64_t)); +local z_size_t gz_read _Z_OF((gz_statep, voidp, z_size_t)); /* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from state.state->fd, and update state.state->eof, state.state->err, and state.state->msg as appropriate. @@ -464,8 +464,8 @@ #endif #if ZLIB_VERNUM <= 0x1250 -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); -ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc _Z_OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc_ _Z_OF((gzFile file)); #endif int ZEXPORT gzgetc(file) diff --git a/sys/contrib/zstd/zlibWrapper/gzwrite.c b/sys/contrib/zstd/zlibWrapper/gzwrite.c --- a/sys/contrib/zstd/zlibWrapper/gzwrite.c +++ b/sys/contrib/zstd/zlibWrapper/gzwrite.c @@ -3,7 +3,7 @@ /* gzwrite.c -- zlib functions for writing gzip files * Copyright (C) 2004-2017 Mark Adler - * For conditions of distribution and use, see http://www.zlib.net/zlib_license.html + * For conditions of distribution and use, see https://www.zlib.net/zlib_license.html */ #include @@ -11,10 +11,10 @@ #include "gzguts.h" /* Local functions */ -local int gz_init OF((gz_statep)); -local int gz_comp OF((gz_statep, int)); -local int gz_zero OF((gz_statep, z_off64_t)); -local z_size_t gz_write OF((gz_statep, voidpc, z_size_t)); +local int gz_init _Z_OF((gz_statep)); +local int gz_comp _Z_OF((gz_statep, int)); +local int gz_zero _Z_OF((gz_statep, z_off64_t)); +local z_size_t gz_write _Z_OF((gz_statep, voidpc, z_size_t)); /* Initialize state for writing a gzip file. Mark initialization by setting state.state->size to non-zero. Return -1 on a memory allocation failure, or 0 on diff --git a/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.h b/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.h --- a/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.h +++ b/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -25,6 +25,9 @@ #define z_const #endif +#if !defined(_Z_OF) + #define _Z_OF OF +#endif /* returns a string with version of zstd library */ const char * zstdVersion(void); diff --git a/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.c b/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.c --- a/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.c +++ b/sys/contrib/zstd/zlibWrapper/zstd_zlibwrapper.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the @@ -101,7 +101,7 @@ const char * zstdVersion(void) { return ZSTD_VERSION_STRING; } -ZEXTERN const char * ZEXPORT z_zlibVersion OF((void)) { return zlibVersion(); } +ZEXTERN const char * ZEXPORT z_zlibVersion _Z_OF((void)) { return zlibVersion(); } static void* ZWRAP_allocFunction(void* opaque, size_t size) { @@ -260,7 +260,7 @@ return (struct internal_state*) ptr; } -ZEXTERN int ZEXPORT z_deflateInit_ OF((z_streamp strm, int level, +ZEXTERN int ZEXPORT z_deflateInit_ _Z_OF((z_streamp strm, int level, const char *version, int stream_size)) { ZWRAP_CCtx* zwc; @@ -287,7 +287,7 @@ } -ZEXTERN int ZEXPORT z_deflateInit2_ OF((z_streamp strm, int level, int method, +ZEXTERN int ZEXPORT z_deflateInit2_ _Z_OF((z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size)) @@ -319,7 +319,7 @@ } -ZEXTERN int ZEXPORT z_deflateReset OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_deflateReset _Z_OF((z_streamp strm)) { LOG_WRAPPERC("- deflateReset\n"); if (!g_ZWRAP_useZSTDcompression) @@ -334,7 +334,7 @@ } -ZEXTERN int ZEXPORT z_deflateSetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateSetDictionary _Z_OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)) { @@ -359,7 +359,7 @@ } -ZEXTERN int ZEXPORT z_deflate OF((z_streamp strm, int flush)) +ZEXTERN int ZEXPORT z_deflate _Z_OF((z_streamp strm, int flush)) { ZWRAP_CCtx* zwc; @@ -465,7 +465,7 @@ } -ZEXTERN int ZEXPORT z_deflateEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_deflateEnd _Z_OF((z_streamp strm)) { if (!g_ZWRAP_useZSTDcompression) { LOG_WRAPPERC("- deflateEnd\n"); @@ -483,7 +483,7 @@ } -ZEXTERN uLong ZEXPORT z_deflateBound OF((z_streamp strm, +ZEXTERN uLong ZEXPORT z_deflateBound _Z_OF((z_streamp strm, uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) @@ -493,7 +493,7 @@ } -ZEXTERN int ZEXPORT z_deflateParams OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateParams _Z_OF((z_streamp strm, int level, int strategy)) { @@ -594,7 +594,7 @@ } -ZEXTERN int ZEXPORT z_inflateInit_ OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateInit_ _Z_OF((z_streamp strm, const char* version, int stream_size)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB) { @@ -623,7 +623,7 @@ } -ZEXTERN int ZEXPORT z_inflateInit2_ OF((z_streamp strm, int windowBits, +ZEXTERN int ZEXPORT z_inflateInit2_ _Z_OF((z_streamp strm, int windowBits, const char *version, int stream_size)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB) { @@ -660,7 +660,7 @@ } -ZEXTERN int ZEXPORT z_inflateReset OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateReset _Z_OF((z_streamp strm)) { LOG_WRAPPERD("- inflateReset\n"); if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -678,7 +678,7 @@ #if ZLIB_VERNUM >= 0x1240 -ZEXTERN int ZEXPORT z_inflateReset2 OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateReset2 _Z_OF((z_streamp strm, int windowBits)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -696,7 +696,7 @@ #endif -ZEXTERN int ZEXPORT z_inflateSetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateSetDictionary _Z_OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)) { @@ -706,8 +706,10 @@ { ZWRAP_DCtx* const zwd = (ZWRAP_DCtx*) strm->state; if (zwd == NULL || zwd->zbd == NULL) return Z_STREAM_ERROR; - { size_t const initErr = ZSTD_initDStream_usingDict(zwd->zbd, dictionary, dictLength); - if (ZSTD_isError(initErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } + { size_t const resetErr = ZSTD_DCtx_reset(zwd->zbd, ZSTD_reset_session_only); + if (ZSTD_isError(resetErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } + { size_t const loadErr = ZSTD_DCtx_loadDictionary(zwd->zbd, dictionary, dictLength); + if (ZSTD_isError(loadErr)) return ZWRAPD_finishWithError(zwd, strm, 0); } zwd->decompState = ZWRAP_useReset; if (zwd->totalInBytes == ZSTD_HEADERSIZE) { @@ -730,7 +732,7 @@ } -ZEXTERN int ZEXPORT z_inflate OF((z_streamp strm, int flush)) +ZEXTERN int ZEXPORT z_inflate _Z_OF((z_streamp strm, int flush)) { ZWRAP_DCtx* zwd; @@ -926,7 +928,7 @@ } -ZEXTERN int ZEXPORT z_inflateEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateEnd _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateEnd(strm); @@ -943,7 +945,7 @@ } -ZEXTERN int ZEXPORT z_inflateSync OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateSync _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) { return inflateSync(strm); @@ -955,7 +957,7 @@ /* Advanced compression functions */ -ZEXTERN int ZEXPORT z_deflateCopy OF((z_streamp dest, +ZEXTERN int ZEXPORT z_deflateCopy _Z_OF((z_streamp dest, z_streamp source)) { if (!g_ZWRAP_useZSTDcompression) @@ -964,7 +966,7 @@ } -ZEXTERN int ZEXPORT z_deflateTune OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateTune _Z_OF((z_streamp strm, int good_length, int max_lazy, int nice_length, @@ -977,7 +979,7 @@ #if ZLIB_VERNUM >= 0x1260 -ZEXTERN int ZEXPORT z_deflatePending OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflatePending _Z_OF((z_streamp strm, unsigned *pending, int *bits)) { @@ -988,7 +990,7 @@ #endif -ZEXTERN int ZEXPORT z_deflatePrime OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflatePrime _Z_OF((z_streamp strm, int bits, int value)) { @@ -998,7 +1000,7 @@ } -ZEXTERN int ZEXPORT z_deflateSetHeader OF((z_streamp strm, +ZEXTERN int ZEXPORT z_deflateSetHeader _Z_OF((z_streamp strm, gz_headerp head)) { if (!g_ZWRAP_useZSTDcompression) @@ -1011,7 +1013,7 @@ /* Advanced decompression functions */ #if ZLIB_VERNUM >= 0x1280 -ZEXTERN int ZEXPORT z_inflateGetDictionary OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateGetDictionary _Z_OF((z_streamp strm, Bytef *dictionary, uInt *dictLength)) { @@ -1022,7 +1024,7 @@ #endif -ZEXTERN int ZEXPORT z_inflateCopy OF((z_streamp dest, +ZEXTERN int ZEXPORT z_inflateCopy _Z_OF((z_streamp dest, z_streamp source)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !source->reserved) @@ -1032,7 +1034,7 @@ #if ZLIB_VERNUM >= 0x1240 -ZEXTERN long ZEXPORT z_inflateMark OF((z_streamp strm)) +ZEXTERN long ZEXPORT z_inflateMark _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateMark(strm); @@ -1041,7 +1043,7 @@ #endif -ZEXTERN int ZEXPORT z_inflatePrime OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflatePrime _Z_OF((z_streamp strm, int bits, int value)) { @@ -1051,7 +1053,7 @@ } -ZEXTERN int ZEXPORT z_inflateGetHeader OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateGetHeader _Z_OF((z_streamp strm, gz_headerp head)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) @@ -1060,7 +1062,7 @@ } -ZEXTERN int ZEXPORT z_inflateBackInit_ OF((z_streamp strm, int windowBits, +ZEXTERN int ZEXPORT z_inflateBackInit_ _Z_OF((z_streamp strm, int windowBits, unsigned char FAR *window, const char *version, int stream_size)) @@ -1071,7 +1073,7 @@ } -ZEXTERN int ZEXPORT z_inflateBack OF((z_streamp strm, +ZEXTERN int ZEXPORT z_inflateBack _Z_OF((z_streamp strm, in_func in, void FAR *in_desc, out_func out, void FAR *out_desc)) { @@ -1081,7 +1083,7 @@ } -ZEXTERN int ZEXPORT z_inflateBackEnd OF((z_streamp strm)) +ZEXTERN int ZEXPORT z_inflateBackEnd _Z_OF((z_streamp strm)) { if (g_ZWRAPdecompressionType == ZWRAP_FORCE_ZLIB || !strm->reserved) return inflateBackEnd(strm); @@ -1089,14 +1091,14 @@ } -ZEXTERN uLong ZEXPORT z_zlibCompileFlags OF((void)) { return zlibCompileFlags(); } +ZEXTERN uLong ZEXPORT z_zlibCompileFlags _Z_OF((void)) { return zlibCompileFlags(); } /* === utility functions === */ #ifndef Z_SOLO -ZEXTERN int ZEXPORT z_compress OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_compress _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) @@ -1115,7 +1117,7 @@ } -ZEXTERN int ZEXPORT z_compress2 OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_compress2 _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen, int level)) { @@ -1131,7 +1133,7 @@ } -ZEXTERN uLong ZEXPORT z_compressBound OF((uLong sourceLen)) +ZEXTERN uLong ZEXPORT z_compressBound _Z_OF((uLong sourceLen)) { if (!g_ZWRAP_useZSTDcompression) return compressBound(sourceLen); @@ -1140,7 +1142,7 @@ } -ZEXTERN int ZEXPORT z_uncompress OF((Bytef *dest, uLongf *destLen, +ZEXTERN int ZEXPORT z_uncompress _Z_OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)) { if (!ZSTD_isFrame(source, sourceLen)) @@ -1159,24 +1161,24 @@ /* checksum functions */ -ZEXTERN uLong ZEXPORT z_adler32 OF((uLong adler, const Bytef *buf, uInt len)) +ZEXTERN uLong ZEXPORT z_adler32 _Z_OF((uLong adler, const Bytef *buf, uInt len)) { return adler32(adler, buf, len); } -ZEXTERN uLong ZEXPORT z_crc32 OF((uLong crc, const Bytef *buf, uInt len)) +ZEXTERN uLong ZEXPORT z_crc32 _Z_OF((uLong crc, const Bytef *buf, uInt len)) { return crc32(crc, buf, len); } #if ZLIB_VERNUM >= 0x12B0 -ZEXTERN uLong ZEXPORT z_adler32_z OF((uLong adler, const Bytef *buf, z_size_t len)) +ZEXTERN uLong ZEXPORT z_adler32_z _Z_OF((uLong adler, const Bytef *buf, z_size_t len)) { return adler32_z(adler, buf, len); } -ZEXTERN uLong ZEXPORT z_crc32_z OF((uLong crc, const Bytef *buf, z_size_t len)) +ZEXTERN uLong ZEXPORT z_crc32_z _Z_OF((uLong crc, const Bytef *buf, z_size_t len)) { return crc32_z(crc, buf, len); } @@ -1184,14 +1186,14 @@ #if ZLIB_VERNUM >= 0x1270 -ZEXTERN const z_crc_t FAR * ZEXPORT z_get_crc_table OF((void)) +ZEXTERN const z_crc_t FAR * ZEXPORT z_get_crc_table _Z_OF((void)) { return get_crc_table(); } #endif /* Error function */ -ZEXTERN const char * ZEXPORT z_zError OF((int err)) +ZEXTERN const char * ZEXPORT z_zError _Z_OF((int err)) { /* Just use zlib Error function */ return zError(err); diff --git a/sys/fs/tarfs/tarfs_io.c b/sys/fs/tarfs/tarfs_io.c --- a/sys/fs/tarfs/tarfs_io.c +++ b/sys/fs/tarfs/tarfs_io.c @@ -355,7 +355,7 @@ if (reset) { zio->ipos = zio->idx[zio->curidx].i; zio->opos = zio->idx[zio->curidx].o; - ZSTD_resetDStream(zstd->zds); + ZSTD_DCtx_reset(zstd->zds, ZSTD_reset_session_only); TARFS_DPF(ZIDX, "%s: skipping to index %u = i %zu o %zu\n", __func__, zio->curidx, (size_t)zio->ipos, (size_t)zio->opos); } else { @@ -511,7 +511,7 @@ zio->curidx = 0; zio->ipos = zio->idx[0].i; zio->opos = zio->idx[0].o; - ZSTD_resetDStream(zstd->zds); + ZSTD_DCtx_reset(zstd->zds, ZSTD_reset_session_only); } return (error); }