Index: vendor/zstd/dist/LICENSE-examples =================================================================== --- vendor/zstd/dist/LICENSE-examples (revision 325596) +++ vendor/zstd/dist/LICENSE-examples (nonexistent) @@ -1,11 +0,0 @@ -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. - -The examples provided by Facebook are for non-commercial testing and evaluation -purposes only. Facebook reserves all rights not expressly granted. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -FACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Index: vendor/zstd/dist/COPYING =================================================================== --- vendor/zstd/dist/COPYING (nonexistent) +++ vendor/zstd/dist/COPYING (revision 325597) @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. \ No newline at end of file Index: vendor/zstd/dist/Makefile =================================================================== --- vendor/zstd/dist/Makefile (revision 325596) +++ vendor/zstd/dist/Makefile (revision 325597) @@ -1,334 +1,348 @@ # ################################################################ -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. # All rights reserved. # -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). # ################################################################ PRGDIR = programs ZSTDDIR = lib BUILDIR = build ZWRAPDIR = zlibWrapper TESTDIR = tests +FUZZDIR = $(TESTDIR)/fuzz # Define nul output VOID = /dev/null ifneq (,$(filter Windows%,$(OS))) EXT =.exe else EXT = endif .PHONY: default default: lib-release zstd-release .PHONY: all all: | allmost examples manual .PHONY: allmost -allmost: - $(MAKE) -C $(ZSTDDIR) all - $(MAKE) -C $(PRGDIR) all - $(MAKE) -C $(TESTDIR) all +allmost: allzstd $(MAKE) -C $(ZWRAPDIR) all #skip zwrapper, can't build that on alternate architectures without the proper zlib installed -.PHONY: allarch -allarch: +.PHONY: allzstd +allzstd: $(MAKE) -C $(ZSTDDIR) all $(MAKE) -C $(PRGDIR) all $(MAKE) -C $(TESTDIR) all .PHONY: all32 all32: $(MAKE) -C $(PRGDIR) zstd32 $(MAKE) -C $(TESTDIR) all32 .PHONY: lib lib: @$(MAKE) -C $(ZSTDDIR) $@ .PHONY: lib-release lib-release: @$(MAKE) -C $(ZSTDDIR) .PHONY: zstd zstd: @$(MAKE) -C $(PRGDIR) $@ cp $(PRGDIR)/zstd$(EXT) . .PHONY: zstd-release zstd-release: @$(MAKE) -C $(PRGDIR) cp $(PRGDIR)/zstd$(EXT) . .PHONY: zstdmt zstdmt: @$(MAKE) -C $(PRGDIR) $@ cp $(PRGDIR)/zstd$(EXT) ./zstdmt$(EXT) .PHONY: zlibwrapper zlibwrapper: $(MAKE) -C $(ZWRAPDIR) test .PHONY: test shortest test shortest: $(MAKE) -C $(PRGDIR) allVariants $(MAKE) -C $(TESTDIR) $@ .PHONY: examples examples: CPPFLAGS=-I../lib LDFLAGS=-L../lib $(MAKE) -C examples/ all .PHONY: manual manual: $(MAKE) -C contrib/gen_html $@ .PHONY: cleanTabs cleanTabs: cd contrib; ./cleanTabs .PHONY: clean clean: @$(MAKE) -C $(ZSTDDIR) $@ > $(VOID) @$(MAKE) -C $(PRGDIR) $@ > $(VOID) @$(MAKE) -C $(TESTDIR) $@ > $(VOID) @$(MAKE) -C $(ZWRAPDIR) $@ > $(VOID) @$(MAKE) -C examples/ $@ > $(VOID) @$(MAKE) -C contrib/gen_html $@ > $(VOID) @$(RM) zstd$(EXT) zstdmt$(EXT) tmp* + @$(RM) -r lz4 @echo Cleaning completed #------------------------------------------------------------------------------ # make install is validated only for Linux, OSX, Hurd and some BSD targets #------------------------------------------------------------------------------ ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD MSYS_NT)) HOST_OS = POSIX CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON .PHONY: list list: @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs -.PHONY: install clangtest gpptest armtest usan asan uasan +.PHONY: install clangtest armtest usan asan uasan install: @$(MAKE) -C $(ZSTDDIR) $@ @$(MAKE) -C $(PRGDIR) $@ .PHONY: uninstall uninstall: @$(MAKE) -C $(ZSTDDIR) $@ @$(MAKE) -C $(PRGDIR) $@ .PHONY: travis-install travis-install: $(MAKE) install PREFIX=~/install_test_dir .PHONY: gppbuild gppbuild: clean g++ -v CC=g++ $(MAKE) -C programs all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror" .PHONY: gcc5build gcc5build: clean gcc-5 -v CC=gcc-5 $(MAKE) all MOREFLAGS="-Werror" .PHONY: gcc6build gcc6build: clean gcc-6 -v CC=gcc-6 $(MAKE) all MOREFLAGS="-Werror" .PHONY: gcc7build gcc7build: clean gcc-7 -v CC=gcc-7 $(MAKE) all MOREFLAGS="-Werror" .PHONY: clangbuild clangbuild: clean clang -v CXX=clang++ CC=clang $(MAKE) all MOREFLAGS="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation" m32build: clean gcc -v $(MAKE) all32 armbuild: clean - CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allarch + CC=arm-linux-gnueabi-gcc CFLAGS="-Werror" $(MAKE) allzstd aarch64build: clean - CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allarch + CC=aarch64-linux-gnu-gcc CFLAGS="-Werror" $(MAKE) allzstd ppcbuild: clean - CC=powerpc-linux-gnu-gcc CLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allarch + CC=powerpc-linux-gnu-gcc CLAGS="-m32 -Wno-attributes -Werror" $(MAKE) allzstd ppc64build: clean - CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allarch + CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allzstd armfuzz: clean CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest aarch64fuzz: clean CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest ppcfuzz: clean CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest ppc64fuzz: clean CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest -gpptest: clean - CC=$(CXX) $(MAKE) -C $(PRGDIR) all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror" +.PHONY: cxxtest +cxxtest: CXXFLAGS += -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror +cxxtest: clean + $(MAKE) -C $(PRGDIR) all CC="$(CXX) -Wno-deprecated" CFLAGS="$(CXXFLAGS)" # adding -Wno-deprecated to avoid clang++ warning on dealing with C files directly gcc5test: clean gcc-5 -v $(MAKE) all CC=gcc-5 MOREFLAGS="-Werror" gcc6test: clean gcc-6 -v $(MAKE) all CC=gcc-6 MOREFLAGS="-Werror" clangtest: clean clang -v $(MAKE) all CXX=clang-++ CC=clang MOREFLAGS="-Werror -Wconversion -Wno-sign-conversion -Wdocumentation" armtest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests aarch64test: $(MAKE) -C $(TESTDIR) datagen # use native, faster $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests ppctest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests ppc64test: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests arm-ppc-compilation: $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" $(MAKE) -C $(PRGDIR) clean zstd CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" +regressiontest: + $(MAKE) -C $(FUZZDIR) regressiontest + +uasanregressiontest: + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=address,undefined" CXXFLAGS="-O3 -fsanitize=address,undefined" + +msanregressiontest: + $(MAKE) -C $(FUZZDIR) regressiontest CC=clang CXX=clang++ CFLAGS="-O3 -fsanitize=memory" CXXFLAGS="-O3 -fsanitize=memory" + # run UBsan with -fsanitize-recover=signed-integer-overflow # due to a bug in UBsan when doing pointer subtraction # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 usan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=undefined" asan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address" asan-%: clean LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address" $(MAKE) -C $(TESTDIR) $* msan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer" HAVE_LZMA=0 # datagen.c fails this test for no obvious reason msan-%: clean LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) HAVE_LZMA=0 $* asan32: clean $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address" uasan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" uasan-%: clean LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" $(MAKE) -C $(TESTDIR) $* tsan-%: clean LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS=--no-big-tests apt-install: sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES) apt-add-repo: sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test sudo apt-get update -y -qq ppcinstall: APT_PACKAGES="qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu" $(MAKE) apt-install arminstall: APT_PACKAGES="qemu-system-arm qemu-user-static gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross" $(MAKE) apt-install valgrindinstall: APT_PACKAGES="valgrind" $(MAKE) apt-install libc6install: APT_PACKAGES="libc6-dev-i386 gcc-multilib" $(MAKE) apt-install gcc6install: apt-add-repo APT_PACKAGES="libc6-dev-i386 gcc-multilib gcc-6 gcc-6-multilib" $(MAKE) apt-install gpp6install: apt-add-repo APT_PACKAGES="libc6-dev-i386 g++-multilib gcc-6 g++-6 g++-6-multilib" $(MAKE) apt-install clang38install: APT_PACKAGES="clang-3.8" $(MAKE) apt-install +# Ubuntu 14.04 ships a too-old lz4 +lz4install: + [ -e lz4 ] || git clone https://github.com/lz4/lz4 && sudo $(MAKE) -C lz4 install + endif ifneq (,$(filter MSYS%,$(shell uname))) HOST_OS = MSYS CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON endif #------------------------------------------------------------------------ -#make tests validated only for MSYS, Linux, OSX, kFreeBSD and Hurd targets +# target specific tests #------------------------------------------------------------------------ ifneq (,$(filter $(HOST_OS),MSYS POSIX)) cmakebuild: cmake --version $(RM) -r $(BUILDIR)/cmake/build mkdir $(BUILDIR)/cmake/build cd $(BUILDIR)/cmake/build ; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall c90build: clean - gcc -v + $(CC) -v CFLAGS="-std=c90" $(MAKE) allmost # will fail, due to missing support for `long long` gnu90build: clean - gcc -v + $(CC) -v CFLAGS="-std=gnu90" $(MAKE) allmost c99build: clean - gcc -v + $(CC) -v CFLAGS="-std=c99" $(MAKE) allmost gnu99build: clean - gcc -v + $(CC) -v CFLAGS="-std=gnu99" $(MAKE) allmost c11build: clean - gcc -v + $(CC) -v CFLAGS="-std=c11" $(MAKE) allmost bmix64build: clean - gcc -v + $(CC) -v CFLAGS="-O3 -mbmi -Werror" $(MAKE) -C $(TESTDIR) test bmix32build: clean - gcc -v + $(CC) -v CFLAGS="-O3 -mbmi -mx32 -Werror" $(MAKE) -C $(TESTDIR) test bmi32build: clean - gcc -v + $(CC) -v CFLAGS="-O3 -mbmi -m32 -Werror" $(MAKE) -C $(TESTDIR) test staticAnalyze: clean - gcc -v + $(CC) -v CPPFLAGS=-g scan-build --status-bugs -v $(MAKE) all endif Index: vendor/zstd/dist/NEWS =================================================================== --- vendor/zstd/dist/NEWS (revision 325596) +++ vendor/zstd/dist/NEWS (revision 325597) @@ -1,313 +1,336 @@ +v1.3.2 +new : long range mode, using --long command, by Stella Lau (@stellamplau) +new : ability to generate and decode magicless frames (#591) +changed : maximum nb of threads reduced to 200, to avoid address space exhaustion in 32-bits mode +fix : multi-threading compression works with custom allocators +fix : ZSTD_sizeof_CStream() was over-evaluating memory usage +fix : a rare compression bug when compression generates very large distances and bunch of other conditions (only possible at --ultra -22) +fix : 32-bits build can now decode large offsets (levels 21+) +cli : added LZ4 frame support by default, by Felix Handte (@felixhandte) +cli : improved --list output +cli : new : can split input file for dictionary training, using command -B# +cli : new : clean operation artefact on Ctrl-C interruption +cli : fix : do not change /dev/null permissions when using command -t with root access, reported by @mike155 (#851) +cli : fix : write file size in header in multiple-files mode +api : added macro ZSTD_COMPRESSBOUND() for static allocation +api : experimental : new advanced decompression API +api : fix : sizeof_CCtx() used to over-estimate +build: fix : no-multithread variant compiles without pool.c dependency, reported by Mitchell Blank Jr (@mitchblank) (#819) +build: better compatibility with reproducible builds, by Bernhard M. Wiedemann (@bmwiedemann) (#818) +example : added streaming_memory_usage +license : changed /examples license to BSD + GPLv2 +license : fix a few header files to reflect new license (#825) + v1.3.1 New license : BSD + GPLv2 perf: substantially decreased memory usage in Multi-threading mode, thanks to reports by Tino Reichardt (@mcmilk) perf: Multi-threading supports up to 256 threads. Cap at 256 when more are requested (#760) cli : improved and fixed --list command, by @ib (#772) cli : command -vV to list supported formats, by @ib (#771) build : fixed binary variants, reported by @svenha (#788) build : fix Visual compilation for non x86/x64 targets, reported by Greg Slazinski (@GregSlazinski) (#718) API exp : breaking change : ZSTD_getframeHeader() provides more information API exp : breaking change : pinned down values of error codes doc : fixed huffman example, by Ulrich Kunitz (@ulikunitz) new : contrib/adaptive-compression, I/O driven compression strength, by Paul Cruz (@paulcruz74) new : contrib/long_distance_matching, statistics by Stella Lau (@stellamplau) updated : contrib/linux-kernel, by Nick Terrell (@terrelln) v1.3.0 cli : new : `--list` command, by Paul Cruz cli : changed : xz/lzma support enabled by default cli : changed : `-t *` continue processing list after a decompression error API : added : ZSTD_versionString() API : promoted to stable status : ZSTD_getFrameContentSize(), by Sean Purcell API exp : new advanced API : ZSTD_compress_generic(), ZSTD_CCtx_setParameter() API exp : new : API for static or external allocation : ZSTD_initStatic?Ctx() API exp : added : ZSTD_decompressBegin_usingDDict(), requested by Guy Riddle (#700) API exp : clarified memory estimation / measurement functions. API exp : changed : strongest strategy renamed ZSTD_btultra, fastest strategy ZSTD_fast set to 1 tools : decodecorpus can generate random dictionary-compressed samples, by Paul Cruz new : contrib/seekable_format, demo and API, by Sean Purcell changed : contrib/linux-kernel, updated version and license, by Nick Terrell v1.2.0 cli : changed : Multithreading enabled by default (use target zstd-nomt or HAVE_THREAD=0 to disable) cli : new : command -T0 means "detect and use nb of cores", by Sean Purcell cli : new : zstdmt symlink hardwired to `zstd -T0` cli : new : command --threads=# (#671) cli : changed : cover dictionary builder by default, for improved quality, by Nick Terrell cli : new : commands --train-cover and --train-legacy, to select dictionary algorithm and parameters cli : experimental targets `zstd4` and `xzstd4`, with support for lz4 format, by Sean Purcell cli : fix : does not output compressed data on console cli : fix : ignore symbolic links unless --force specified, API : breaking change : ZSTD_createCDict_advanced(), only use compressionParameters as argument API : added : prototypes ZSTD_*_usingCDict_advanced(), for direct control over frameParameters. API : improved: ZSTDMT_compressCCtx() reduced memory usage API : fix : ZSTDMT_compressCCtx() now provides srcSize in header (#634) API : fix : src size stored in frame header is controlled at end of frame API : fix : enforced consistent rules for pledgedSrcSize==0 (#641) API : fix : error code "GENERIC" replaced by "dstSizeTooSmall" when appropriate build: improved cmake script, by @Majlen build: enabled Multi-threading support for *BSD, by Baptiste Daroussin tools: updated Paramgrill. Command -O# provides best parameters for sample and speed target. new : contrib/linux-kernel version, by Nick Terrell v1.1.4 cli : new : can compress in *.gz format, using --format=gzip command, by Przemyslaw Skibinski cli : new : advanced benchmark command --priority=rt cli : fix : write on sparse-enabled file systems in 32-bits mode, by @ds77 cli : fix : --rm remains silent when input is stdin cli : experimental : xzstd, with support for xz/lzma decoding, by Przemyslaw Skibinski speed : improved decompression speed in streaming mode for single shot scenarios (+5%) memory: DDict (decompression dictionary) memory usage down from 150 KB to 20 KB arch: 32-bits variant able to generate and decode very long matches (>32 MB), by Sean Purcell API : new : ZSTD_findFrameCompressedSize(), ZSTD_getFrameContentSize(), ZSTD_findDecompressedSize() API : changed : dropped support of legacy versions <= v0.3 (can be changed by modifying ZSTD_LEGACY_SUPPORT value) build : new: meson build system in contrib/meson, by Dima Krasner build : improved cmake script, by @Majlen build : added -Wformat-security flag, as recommended by Padraig Brady doc : new : educational decoder, by Sean Purcell v1.1.3 cli : zstd can decompress .gz files (can be disabled with `make zstd-nogz` or `make HAVE_ZLIB=0`) cli : new : experimental target `make zstdmt`, with multi-threading support cli : new : improved dictionary builder "cover" (experimental), by Nick Terrell, based on prior work by Giuseppe Ottaviano. cli : new : advanced commands for detailed parameters, by Przemyslaw Skibinski cli : fix zstdless on Mac OS-X, by Andrew Janke cli : fix #232 "compress non-files" dictBuilder : improved dictionary generation quality, thanks to Nick Terrell API : new : lib/compress/ZSTDMT_compress.h multithreading API (experimental) API : new : ZSTD_create?Dict_byReference(), requested by Bartosz Taudul API : new : ZDICT_finalizeDictionary() API : fix : ZSTD_initCStream_usingCDict() properly writes dictID into frame header, by Gregory Szorc (#511) API : fix : all symbols properly exposed in libzstd, by Nick Terrell build : support for Solaris target, by Przemyslaw Skibinski doc : clarified specification, by Sean Purcell v1.1.2 API : streaming : decompression : changed : automatic implicit reset when chain-decoding new frames without init API : experimental : added : dictID retrieval functions, and ZSTD_initCStream_srcSize() API : zbuff : changed : prototypes now generate deprecation warnings lib : improved : faster decompression speed at ultra compression settings and 32-bits mode lib : changed : only public ZSTD_ symbols are now exposed lib : changed : reduced usage of stack memory lib : fixed : several corner case bugs, by Nick Terrell cli : new : gzstd, experimental version able to decode .gz files, by Przemyslaw Skibinski cli : new : preserve file attributes cli : new : added zstdless and zstdgrep tools cli : fixed : status displays total amount decoded, even for file consisting of multiple frames (like pzstd) cli : fixed : zstdcat zlib_wrapper : added support for gz* functions, by Przemyslaw Skibinski install : better compatibility with FreeBSD, by Dimitry Andric source tree : changed : zbuff source files moved to lib/deprecated v1.1.1 New : command -M#, --memory=, --memlimit=, --memlimit-decompress= to limit allowed memory consumption New : doc/zstd_manual.html, by Przemyslaw Skibinski Improved : slightly better compression ratio at --ultra levels (>= 20) Improved : better memory usage when using streaming compression API, thanks to @Rogier-5 report Added : API : ZSTD_initCStream_usingCDict(), ZSTD_initDStream_usingDDict() (experimental section) Added : example/multiple_streaming_compression.c Changed : zstd_errors.h is now installed within /include (and replaces errors_public.h) Updated man page Fixed : zstd-small, zstd-compress and zstd-decompress compilation targets v1.1.0 New : contrib/pzstd, parallel version of zstd, by Nick Terrell added : NetBSD install target (#338) Improved : speed for batches of small files Improved : speed of zlib wrapper, by Przemyslaw Skibinski Changed : libzstd on Windows supports legacy formats, by Christophe Chevalier Fixed : CLI -d output to stdout by default when input is stdin (#322) Fixed : CLI correctly detects console on Mac OS-X Fixed : CLI supports recursive mode `-r` on Mac OS-X Fixed : Legacy decoders use unified error codes, reported by benrg (#341), fixed by Przemyslaw Skibinski Fixed : compatibility with OpenBSD, reported by Juan Francisco Cantero Hurtado (#319) Fixed : compatibility with Hurd, by Przemyslaw Skibinski (#365) Fixed : zstd-pgo, reported by octoploid (#329) v1.0.0 Change Licensing, all project is now BSD, Copyright Facebook Small decompression speed improvement API : Streaming API supports legacy format API : ZDICT_getDictID(), ZSTD_sizeof_{CCtx, DCtx, CStream, DStream}(), ZSTD_setDStreamParamter() CLI supports legacy formats v0.4+ Fixed : compression fails on certain huge files, reported by Jesse McGrew Enhanced documentation, by Przemyslaw Skibinski v0.8.1 New streaming API Changed : --ultra now enables levels beyond 19 Changed : -i# now selects benchmark time in second Fixed : ZSTD_compress* can now compress > 4 GB in a single pass, reported by Nick Terrell Fixed : speed regression on specific patterns (#272) Fixed : support for Z_SYNC_FLUSH, by Dmitry Krot (#291) Fixed : ICC compilation, by Przemyslaw Skibinski v0.8.0 Improved : better speed on clang and gcc -O2, thanks to Eric Biggers New : Build on FreeBSD and DragonFly, thanks to JrMarino Changed : modified API : ZSTD_compressEnd() Fixed : legacy mode with ZSTD_HEAPMODE=0, by Christopher Bergqvist Fixed : premature end of frame when zero-sized raw block, reported by Eric Biggers Fixed : large dictionaries (> 384 KB), reported by Ilona Papava Fixed : checksum correctly checked in single-pass mode Fixed : combined --test amd --rm, reported by Andreas M. Nilsson Modified : minor compression level adaptations Updated : compression format specification to v0.2.0 changed : zstd.h moved to /lib directory v0.7.5 Transition version, supporting decoding of v0.8.x v0.7.4 Added : homebrew for Mac, by Daniel Cade Added : more examples Fixed : segfault when using small dictionaries, reported by Felix Handte Modified : default compression level for CLI is now 3 Updated : specification, to v0.1.1 v0.7.3 New : compression format specification New : `--` separator, stating that all following arguments are file names. Suggested by Chip Turner. New : `ZSTD_getDecompressedSize()` New : OpenBSD target, by Juan Francisco Cantero Hurtado New : `examples` directory fixed : dictBuilder using HC levels, reported by Bartosz Taudul fixed : legacy support from ZSTD_decompress_usingDDict(), reported by Felix Handte fixed : multi-blocks decoding with intermediate uncompressed blocks, reported by Greg Slazinski modified : removed "mem.h" and "error_public.h" dependencies from "zstd.h" (experimental section) modified : legacy functions no longer need magic number v0.7.2 fixed : ZSTD_decompressBlock() using multiple consecutive blocks. Reported by Greg Slazinski. fixed : potential segfault on very large files (many gigabytes). Reported by Chip Turner. fixed : CLI displays system error message when destination file cannot be created (#231). Reported by Chip Turner. v0.7.1 fixed : ZBUFF_compressEnd() called multiple times with too small `dst` buffer, reported by Christophe Chevalier fixed : dictBuilder fails if first sample is too small, reported by Руслан Ковалёв fixed : corruption issue, reported by cj modified : checksum enabled by default in command line mode v0.7.0 New : Support for directory compression, using `-r`, thanks to Przemyslaw Skibinski New : Command `--rm`, to remove source file after successful de/compression New : Visual build scripts, by Christophe Chevalier New : Support for Sparse File-systems (do not use space for zero-filled sectors) New : Frame checksum support New : Support pass-through mode (when using `-df`) API : more efficient Dictionary API : `ZSTD_compress_usingCDict()`, `ZSTD_decompress_usingDDict()` API : create dictionary files from custom content, by Giuseppe Ottaviano API : support for custom malloc/free functions New : controllable Dictionary ID New : Support for skippable frames v0.6.1 New : zlib wrapper API, thanks to Przemyslaw Skibinski New : Ability to compile compressor / decompressor separately Changed : new lib directory structure Fixed : Legacy codec v0.5 compatible with dictionary decompression Fixed : Decoder corruption error (#173) Fixed : null-string roundtrip (#176) New : benchmark mode can select directory as input Experimental : midipix support, VMS support v0.6.0 Stronger high compression modes, thanks to Przemyslaw Skibinski API : ZSTD_getFrameParams() provides size of decompressed content New : highest compression modes require `--ultra` command to fully unleash their capacity Fixed : zstd cli return error code > 0 and removes dst file artifact when decompression fails, thanks to Chip Turner v0.5.1 New : Optimal parsing => Very high compression modes, thanks to Przemyslaw Skibinski Changed : Dictionary builder integrated into libzstd and zstd cli Changed (!) : zstd cli now uses "multiple input files" as default mode. See `zstd -h`. Fix : high compression modes for big-endian platforms New : zstd cli : `-t` | `--test` command v0.5.0 New : dictionary builder utility Changed : streaming & dictionary API Improved : better compression of small data v0.4.7 Improved : small compression speed improvement in HC mode Changed : `zstd_decompress.c` has ZSTD_LEGACY_SUPPORT to 0 by default fix : bt search bug v0.4.6 fix : fast compression mode on Windows New : cmake configuration file, thanks to Artyom Dymchenko Improved : high compression mode on repetitive data New : block-level API New : ZSTD_duplicateCCtx() v0.4.5 new : -m/--multiple : compress/decompress multiple files v0.4.4 Fixed : high compression modes for Windows 32 bits new : external dictionary API extended to buffered mode and accessible through command line new : windows DLL project, thanks to Christophe Chevalier v0.4.3 : new : external dictionary API new : zstd-frugal v0.4.2 : Generic minor improvements for small blocks Fixed : big-endian compatibility, by Peter Harris (#85) v0.4.1 Fixed : ZSTD_LEGACY_SUPPORT=0 build mode (reported by Luben) removed `zstd.c` v0.4.0 Command line utility compatible with high compression levels Removed zstdhc => merged into zstd Added : ZBUFF API (see zstd_buffered.h) Rolling buffer support v0.3.6 small blocks params v0.3.5 minor generic compression improvements v0.3.4 Faster fast cLevels v0.3.3 Small compression ratio improvement v0.3.2 Fixed Visual Studio v0.3.1 : Small compression ratio improvement v0.3 HC mode : compression levels 2-26 v0.2.2 Fix : Visual Studio 2013 & 2015 release compilation, by Christophe Chevalier v0.2.1 Fix : Read errors, advanced fuzzer tests, by Hanno Böck v0.2.0 **Breaking format change** Faster decompression speed Can still decode v0.1 format v0.1.3 fix uninitialization warning, reported by Evan Nemerson v0.1.2 frame concatenation support v0.1.1 fix compression bug detects write-flush errors v0.1.0 first release Index: vendor/zstd/dist/appveyor.yml =================================================================== --- vendor/zstd/dist/appveyor.yml (revision 325596) +++ vendor/zstd/dist/appveyor.yml (revision 325597) @@ -1,240 +1,240 @@ - version: 1.0.{build} branches: only: - dev - master environment: matrix: - COMPILER: "gcc" HOST: "mingw" PLATFORM: "x64" - SCRIPT: "make allarch && make -C tests test-symbols fullbench-dll fullbench-lib" + SCRIPT: "make allzstd MOREFLAGS=-static && make -C tests test-symbols fullbench-dll fullbench-lib" ARTIFACT: "true" BUILD: "true" - COMPILER: "gcc" HOST: "mingw" PLATFORM: "x86" - SCRIPT: "make allarch" + SCRIPT: "make allzstd MOREFLAGS=-static" ARTIFACT: "true" BUILD: "true" - COMPILER: "clang" HOST: "mingw" PLATFORM: "x64" - SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch" + SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allzstd" BUILD: "true" - COMPILER: "gcc" HOST: "mingw" PLATFORM: "x64" SCRIPT: "" TEST: "cmake" - COMPILER: "visual" HOST: "visual" PLATFORM: "x64" CONFIGURATION: "Debug" - COMPILER: "visual" HOST: "visual" PLATFORM: "Win32" CONFIGURATION: "Debug" - COMPILER: "visual" HOST: "visual" PLATFORM: "x64" CONFIGURATION: "Release" - COMPILER: "visual" HOST: "visual" PLATFORM: "Win32" CONFIGURATION: "Release" install: - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - SET PATH_ORIGINAL=%PATH% - if [%HOST%]==[mingw] ( SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe ) - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" ) build_script: - if [%HOST%]==[mingw] ( ( if [%PLATFORM%]==[x64] ( SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" ) else if [%PLATFORM%]==[x86] ( SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" ) ) ) - if [%HOST%]==[mingw] if [%BUILD%]==[true] ( make -v && sh -c "%COMPILER% -v" && ECHO Building zlib to static link && SET "CC=%COMPILER%" && sh -c "cd .. && git clone --depth 1 --branch v1.2.11 https://github.com/madler/zlib" && sh -c "cd ../zlib && make -f win32/Makefile.gcc libz.a" ECHO Building zstd && SET "CPPFLAGS=-I../../zlib" && SET "LDFLAGS=../../zlib/libz.a" && sh -c "%SCRIPT%" && ( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true] lib\dll\example\build_package.bat && make -C programs DEBUGFLAGS= clean zstd && cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe && appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip && cp zstd.exe ..\bin\zstd.exe && cd ..\bin\ && 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * && appveyor PushArtifact zstd-win-release-%PLATFORM%.zip ) ) - if [%HOST%]==[visual] ( ECHO *** && ECHO *** Building Visual Studio 2008 %PLATFORM%\%CONFIGURATION% in %APPVEYOR_BUILD_FOLDER% && ECHO *** && msbuild "build\VS2008\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v90 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\*.exe && MD5sum build/VS2008/bin/%PLATFORM%/%CONFIGURATION%/*.exe && COPY build\VS2008\bin\%PLATFORM%\%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2008_%PLATFORM%_%CONFIGURATION%.exe && ECHO *** && ECHO *** Building Visual Studio 2010 %PLATFORM%\%CONFIGURATION% && ECHO *** && msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && msbuild "build\VS2010\zstd.sln" %ADDITIONALPARAM% /m /verbosity:minimal /property:PlatformToolset=v100 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2010_%PLATFORM%_%CONFIGURATION%.exe && ECHO *** && ECHO *** Building Visual Studio 2012 %PLATFORM%\%CONFIGURATION% && ECHO *** && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v110 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2012_%PLATFORM%_%CONFIGURATION%.exe && ECHO *** && ECHO *** Building Visual Studio 2013 %PLATFORM%\%CONFIGURATION% && ECHO *** && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v120 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2013_%PLATFORM%_%CONFIGURATION%.exe && ECHO *** && ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && ECHO *** && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ ) test_script: - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% - SET "CC=gcc" - SET "CXX=g++" - if [%TEST%]==[cmake] ( mkdir build\cmake\build && cd build\cmake\build && cmake -G "Visual Studio 14 2015 Win64" .. && cd ..\..\.. && make clean ) - SET "FUZZERTEST=-T30s" - if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] ( CD tests && SET ZSTD=./zstd.exe && sh -e playTests.sh --test-large-data && fullbench.exe -i1 && fullbench.exe -i1 -P0 && fuzzer_VS2008_%PLATFORM%_Release.exe %FUZZERTEST% && fuzzer_VS2010_%PLATFORM%_Release.exe %FUZZERTEST% && fuzzer_VS2012_%PLATFORM%_Release.exe %FUZZERTEST% && fuzzer_VS2013_%PLATFORM%_Release.exe %FUZZERTEST% && fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST% ) - version: 1.0.{build} environment: matrix: - COMPILER: "gcc" HOST: "mingw" PLATFORM: "x64" - SCRIPT: "make allarch" + SCRIPT: "make allzstd" - COMPILER: "gcc" HOST: "mingw" PLATFORM: "x86" - SCRIPT: "make allarch" + SCRIPT: "make allzstd" - COMPILER: "clang" HOST: "mingw" PLATFORM: "x64" - SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch" + SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allzstd" - COMPILER: "visual" HOST: "visual" PLATFORM: "x64" CONFIGURATION: "Debug" - COMPILER: "visual" HOST: "visual" PLATFORM: "Win32" CONFIGURATION: "Debug" - COMPILER: "visual" HOST: "visual" PLATFORM: "x64" CONFIGURATION: "Release" - COMPILER: "visual" HOST: "visual" PLATFORM: "Win32" CONFIGURATION: "Release" install: - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - SET PATH_ORIGINAL=%PATH% - if [%HOST%]==[mingw] ( SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe ) - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" ) build_script: - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% - if [%HOST%]==[mingw] ( ( if [%PLATFORM%]==[x64] ( SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" ) else if [%PLATFORM%]==[x86] ( SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" ) ) && make -v && sh -c "%COMPILER% -v" && set "CC=%COMPILER%" && sh -c "%SCRIPT%" ) - if [%HOST%]==[visual] ( ECHO *** && ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && ECHO *** && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ ) Index: vendor/zstd/dist/circle.yml =================================================================== --- vendor/zstd/dist/circle.yml (revision 325596) +++ vendor/zstd/dist/circle.yml (revision 325597) @@ -1,75 +1,80 @@ dependencies: override: - sudo dpkg --add-architecture i386 - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update - sudo apt-get -y install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - sudo apt-get -y install libstdc++-7-dev clang gcc g++ gcc-5 gcc-6 gcc-7 zlib1g-dev liblzma-dev - sudo apt-get -y install linux-libc-dev:i386 libc6-dev-i386 test: override: - ? | - if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then cc -v; make all && make clean; fi && + if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then cc -v; make all && make clean && make -C lib libzstd-nomt && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gnu90build && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make c99build && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gnu99build && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make c11build && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make cmakebuild && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make gppbuild && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc5build && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make gcc6build && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make clangbuild && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make m32build && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make armbuild && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make aarch64build && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make ppcbuild && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make ppc64build && make clean; fi && - if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc7build && make clean; fi #could add another test here + if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make gcc7build && make clean; fi : parallel: true - ? | if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make shortest && make clean; fi && if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then make -C tests test-legacy test-longmatch test-symbols && make clean; fi + : + parallel: true + - ? | + if [[ "$CIRCLE_NODE_INDEX" == "0" ]] ; then make -j regressiontest && make clean; fi && + if [[ "$CIRCLE_NODE_TOTAL" < "2" ]] || [[ "$CIRCLE_NODE_INDEX" == "1" ]]; then true; fi # Could add another test here : parallel: true post: - echo Circle CI tests finished # Longer tests #- make -C tests test-zstd-nolegacy && make clean #- pyenv global 3.4.4; make -C tests versionsTest && make clean #- make zlibwrapper && make clean #- gcc -v; make -C tests test32 MOREFLAGS="-I/usr/include/x86_64-linux-gnu" && make clean #- make uasan && make clean #- make asan32 && make clean #- make -C tests test32 CC=clang MOREFLAGS="-g -fsanitize=address -I/usr/include/x86_64-linux-gnu" # Valgrind tests #- CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make clean #- make -C tests valgrindTest && make clean # ARM, AArch64, PowerPC, PowerPC64 tests #- make ppctest && make clean #- make ppc64test && make clean #- make armtest && make clean #- make aarch64test && make clean Index: vendor/zstd/dist/contrib/gen_html/Makefile =================================================================== --- vendor/zstd/dist/contrib/gen_html/Makefile (revision 325596) +++ vendor/zstd/dist/contrib/gen_html/Makefile (revision 325597) @@ -1,51 +1,51 @@ -# ########################################################################## +# ################################################################ # Copyright (c) 2016-present, Facebook, Inc. # All rights reserved. # -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. -# ########################################################################## +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ CFLAGS ?= -O3 CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wstrict-aliasing=1 -Wswitch-enum -Wno-comment CFLAGS += $(MOREFLAGS) FLAGS = $(CPPFLAGS) $(CFLAGS) $(CXXFLAGS) $(LDFLAGS) ZSTDAPI = ../../lib/zstd.h ZSTDMANUAL = ../../doc/zstd_manual.html LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(ZSTDAPI)` LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(ZSTDAPI)` LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(ZSTDAPI)` LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) LIBVER := $(shell echo $(LIBVER_SCRIPT)) # Define *.exe as extension for Windows systems ifneq (,$(filter Windows%,$(OS))) EXT =.exe else EXT = endif .PHONY: default default: gen_html .PHONY: all all: manual gen_html: gen_html.cpp $(CXX) $(FLAGS) $^ -o $@$(EXT) $(ZSTDMANUAL): gen_html $(ZSTDAPI) echo "Update zstd manual in /doc" ./gen_html $(LIBVER) $(ZSTDAPI) $(ZSTDMANUAL) .PHONY: manual manual: gen_html $(ZSTDMANUAL) .PHONY: clean clean: @$(RM) gen_html$(EXT) @echo Cleaning completed Index: vendor/zstd/dist/contrib/gen_html/gen_html.cpp =================================================================== --- vendor/zstd/dist/contrib/gen_html/gen_html.cpp (revision 325596) +++ vendor/zstd/dist/contrib/gen_html/gen_html.cpp (revision 325597) @@ -1,224 +1,224 @@ /* * Copyright (c) 2016-present, Przemyslaw Skibinski, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include #include #include #include using namespace std; /* trim string at the beginning and at the end */ void trim(string& s, string characters) { size_t p = s.find_first_not_of(characters); s.erase(0, p); p = s.find_last_not_of(characters); if (string::npos != p) s.erase(p+1); } /* trim C++ style comments */ void trim_comments(string &s) { size_t spos, epos; spos = s.find("/*"); epos = s.find("*/"); s = s.substr(spos+3, epos-(spos+3)); } /* get lines until a given terminator */ vector get_lines(vector& input, int& linenum, string terminator) { vector out; string line; size_t epos; while ((size_t)linenum < input.size()) { line = input[linenum]; if (terminator.empty() && line.empty()) { linenum--; break; } epos = line.find(terminator); if (!terminator.empty() && epos!=string::npos) { out.push_back(line); break; } out.push_back(line); linenum++; } return out; } /* print line with ZSTDLIB_API removed and C++ comments not bold */ void print_line(stringstream &sout, string line) { size_t spos; if (line.substr(0,12) == "ZSTDLIB_API ") line = line.substr(12); spos = line.find("/*"); if (spos!=string::npos) { sout << line.substr(0, spos); sout << "" << line.substr(spos) << "" << endl; } else { // fprintf(stderr, "lines=%s\n", line.c_str()); sout << line << endl; } } int main(int argc, char *argv[]) { char exclam; int linenum, chapter = 1; vector input, lines, comments, chapters; string line, version; size_t spos, l; stringstream sout; ifstream istream; ofstream ostream; if (argc < 4) { cout << "usage: " << argv[0] << " [zstd_version] [input_file] [output_html]" << endl; return 1; } version = "zstd " + string(argv[1]) + " Manual"; istream.open(argv[2], ifstream::in); if (!istream.is_open()) { cout << "Error opening file " << argv[2] << endl; return 1; } ostream.open(argv[3], ifstream::out); if (!ostream.is_open()) { cout << "Error opening file " << argv[3] << endl; return 1; } while (getline(istream, line)) { input.push_back(line); } for (linenum=0; (size_t)linenum < input.size(); linenum++) { line = input[linenum]; /* typedefs are detected and included even if uncommented */ if (line.substr(0,7) == "typedef" && line.find("{")!=string::npos) { lines = get_lines(input, linenum, "}"); sout << "
";
             for (l=0; l

" << endl; continue; } /* comments of type /**< and /*!< are detected and only function declaration is highlighted (bold) */ if ((line.find("/**<")!=string::npos || line.find("/*!<")!=string::npos) && line.find("*/")!=string::npos) { sout << "
";
             print_line(sout, line);
             sout << "

" << endl; continue; } spos = line.find("/**="); if (spos==string::npos) { spos = line.find("/*!"); if (spos==string::npos) spos = line.find("/**"); if (spos==string::npos) spos = line.find("/*-"); if (spos==string::npos) spos = line.find("/*="); if (spos==string::npos) continue; exclam = line[spos+2]; } else exclam = '='; comments = get_lines(input, linenum, "*/"); if (!comments.empty()) comments[0] = line.substr(spos+3); if (!comments.empty()) comments[comments.size()-1] = comments[comments.size()-1].substr(0, comments[comments.size()-1].find("*/")); for (l=0; l"; for (l=0; l

"; for (l=0; l
" << endl << endl; } else if (exclam == '=') { /* comments of type /*= and /**= mean: use a

header and show also all functions until first empty line */ trim(comments[0], " "); sout << "

" << comments[0] << "

";
             for (l=1; l
";
             lines = get_lines(input, ++linenum, "");
             for (l=0; l
" << endl; } else { /* comments of type /** and /*- mean: this is a comment; use a

header for the first line */ if (comments.empty()) continue; trim(comments[0], " "); sout << "

" << comments[0] << "

";
             chapters.push_back(comments[0]);
             chapter++;
 
             for (l=1; l 1)
                 sout << "
" << endl << endl; else sout << "
" << endl << endl; } } ostream << "\n\n\n" << version << "\n\n" << endl; ostream << "

" << version << "

\n"; ostream << "
\n

Contents

\n
    \n"; for (size_t i=0; i" << chapters[i].c_str() << "\n"; ostream << "
\n
\n"; ostream << sout.str(); ostream << "" << endl << "" << endl; return 0; } Index: vendor/zstd/dist/contrib/pzstd/ErrorHolder.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/ErrorHolder.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/ErrorHolder.h (revision 325597) @@ -1,54 +1,54 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include #include #include #include namespace pzstd { // Coordinates graceful shutdown of the pzstd pipeline class ErrorHolder { std::atomic error_; std::string message_; public: ErrorHolder() : error_(false) {} bool hasError() noexcept { return error_.load(); } void setError(std::string message) noexcept { // Given multiple possibly concurrent calls, exactly one will ever succeed. bool expected = false; if (error_.compare_exchange_strong(expected, true)) { message_ = std::move(message); } } bool check(bool predicate, std::string message) noexcept { if (!predicate) { setError(std::move(message)); } return !hasError(); } std::string getError() noexcept { error_.store(false); return std::move(message_); } ~ErrorHolder() { assert(!hasError()); } }; } Index: vendor/zstd/dist/contrib/pzstd/Logging.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/Logging.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Logging.h (revision 325597) @@ -1,72 +1,72 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include #include namespace pzstd { constexpr int ERROR = 1; constexpr int INFO = 2; constexpr int DEBUG = 3; constexpr int VERBOSE = 4; class Logger { std::mutex mutex_; FILE* out_; const int level_; using Clock = std::chrono::system_clock; Clock::time_point lastUpdate_; std::chrono::milliseconds refreshRate_; public: explicit Logger(int level, FILE* out = stderr) : out_(out), level_(level), lastUpdate_(Clock::now()), refreshRate_(150) {} bool logsAt(int level) { return level <= level_; } template void operator()(int level, const char *fmt, Args... args) { if (level > level_) { return; } std::lock_guard lock(mutex_); std::fprintf(out_, fmt, args...); } template void update(int level, const char *fmt, Args... args) { if (level > level_) { return; } std::lock_guard lock(mutex_); auto now = Clock::now(); if (now - lastUpdate_ > refreshRate_) { lastUpdate_ = now; std::fprintf(out_, "\r"); std::fprintf(out_, fmt, args...); } } void clear(int level) { if (level > level_) { return; } std::lock_guard lock(mutex_); std::fprintf(out_, "\r%79s\r", ""); } }; } Index: vendor/zstd/dist/contrib/pzstd/Makefile =================================================================== --- vendor/zstd/dist/contrib/pzstd/Makefile (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Makefile (revision 325597) @@ -1,269 +1,269 @@ -# ########################################################################## +# ################################################################ # Copyright (c) 2016-present, Facebook, Inc. # All rights reserved. # -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. -# ########################################################################## +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ # Standard variables for installation DESTDIR ?= PREFIX ?= /usr/local BINDIR := $(DESTDIR)$(PREFIX)/bin ZSTDDIR = ../../lib PROGDIR = ../../programs # External program to use to run tests, e.g. qemu or valgrind TESTPROG ?= # Flags to pass to the tests TESTFLAGS ?= # We use gcc/clang to generate the header dependencies of files DEPFLAGS = -MMD -MP -MF $*.Td POSTCOMPILE = mv -f $*.Td $*.d # CFLAGS, CXXFLAGS, CPPFLAGS, and LDFLAGS are for the users to override CFLAGS ?= -O3 -Wall -Wextra CXXFLAGS ?= -O3 -Wall -Wextra -pedantic CPPFLAGS ?= LDFLAGS ?= # Include flags PZSTD_INC = -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(PROGDIR) -I. GTEST_INC = -isystem googletest/googletest/include PZSTD_CPPFLAGS = $(PZSTD_INC) PZSTD_CCXXFLAGS = PZSTD_CFLAGS = $(PZSTD_CCXXFLAGS) PZSTD_CXXFLAGS = $(PZSTD_CCXXFLAGS) -std=c++11 PZSTD_LDFLAGS = EXTRA_FLAGS = ALL_CFLAGS = $(EXTRA_FLAGS) $(CPPFLAGS) $(PZSTD_CPPFLAGS) $(CFLAGS) $(PZSTD_CFLAGS) ALL_CXXFLAGS = $(EXTRA_FLAGS) $(CPPFLAGS) $(PZSTD_CPPFLAGS) $(CXXFLAGS) $(PZSTD_CXXFLAGS) ALL_LDFLAGS = $(EXTRA_FLAGS) $(LDFLAGS) $(PZSTD_LDFLAGS) # gtest libraries need to go before "-lpthread" because they depend on it. GTEST_LIB = -L googletest/build/googlemock/gtest LIBS = # Compilation commands LD_COMMAND = $(CXX) $^ $(ALL_LDFLAGS) $(LIBS) -lpthread -o $@ CC_COMMAND = $(CC) $(DEPFLAGS) $(ALL_CFLAGS) -c $< -o $@ CXX_COMMAND = $(CXX) $(DEPFLAGS) $(ALL_CXXFLAGS) -c $< -o $@ # Get a list of all zstd files so we rebuild the static library when we need to ZSTDCOMMON_FILES := $(wildcard $(ZSTDDIR)/common/*.c) \ $(wildcard $(ZSTDDIR)/common/*.h) ZSTDCOMP_FILES := $(wildcard $(ZSTDDIR)/compress/*.c) \ $(wildcard $(ZSTDDIR)/compress/*.h) ZSTDDECOMP_FILES := $(wildcard $(ZSTDDIR)/decompress/*.c) \ $(wildcard $(ZSTDDIR)/decompress/*.h) ZSTDPROG_FILES := $(wildcard $(PROGDIR)/*.c) \ $(wildcard $(PROGDIR)/*.h) ZSTD_FILES := $(wildcard $(ZSTDDIR)/*.h) \ $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) \ $(ZSTDPROG_FILES) # List all the pzstd source files so we can determine their dependencies PZSTD_SRCS := $(wildcard *.cpp) PZSTD_TESTS := $(wildcard test/*.cpp) UTILS_TESTS := $(wildcard utils/test/*.cpp) ALL_SRCS := $(PZSTD_SRCS) $(PZSTD_TESTS) $(UTILS_TESTS) # Define *.exe as extension for Windows systems ifneq (,$(filter Windows%,$(OS))) EXT =.exe else EXT = endif # Standard targets .PHONY: default default: all .PHONY: test-pzstd test-pzstd: TESTFLAGS=--gtest_filter=-*ExtremelyLarge* test-pzstd: clean googletest pzstd tests check .PHONY: test-pzstd32 test-pzstd32: clean googletest32 all32 check .PHONY: test-pzstd-tsan test-pzstd-tsan: LDFLAGS=-fuse-ld=gold test-pzstd-tsan: TESTFLAGS=--gtest_filter=-*ExtremelyLarge* test-pzstd-tsan: clean googletest tsan check .PHONY: test-pzstd-asan test-pzstd-asan: LDFLAGS=-fuse-ld=gold test-pzstd-asan: TESTFLAGS=--gtest_filter=-*ExtremelyLarge* test-pzstd-asan: clean asan check .PHONY: check check: $(TESTPROG) ./utils/test/BufferTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./utils/test/RangeTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./utils/test/ResourcePoolTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./utils/test/ScopeGuardTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./utils/test/ThreadPoolTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./utils/test/WorkQueueTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./test/OptionsTest$(EXT) $(TESTFLAGS) $(TESTPROG) ./test/PzstdTest$(EXT) $(TESTFLAGS) .PHONY: install install: PZSTD_CPPFLAGS += -DNDEBUG install: pzstd$(EXT) install -d -m 755 $(BINDIR)/ install -m 755 pzstd$(EXT) $(BINDIR)/pzstd$(EXT) .PHONY: uninstall uninstall: $(RM) $(BINDIR)/pzstd$(EXT) # Targets for many different builds .PHONY: all all: PZSTD_CPPFLAGS += -DNDEBUG all: pzstd$(EXT) .PHONY: debug debug: EXTRA_FLAGS += -g debug: pzstd$(EXT) tests roundtrip .PHONY: tsan tsan: PZSTD_CCXXFLAGS += -fsanitize=thread -fPIC tsan: PZSTD_LDFLAGS += -fsanitize=thread tsan: debug .PHONY: asan asan: EXTRA_FLAGS += -fsanitize=address asan: debug .PHONY: ubsan ubsan: EXTRA_FLAGS += -fsanitize=undefined ubsan: debug .PHONY: all32 all32: EXTRA_FLAGS += -m32 all32: all tests roundtrip .PHONY: debug32 debug32: EXTRA_FLAGS += -m32 debug32: debug .PHONY: asan32 asan32: EXTRA_FLAGS += -m32 asan32: asan .PHONY: tsan32 tsan32: EXTRA_FLAGS += -m32 tsan32: tsan .PHONY: ubsan32 ubsan32: EXTRA_FLAGS += -m32 ubsan32: ubsan # Run long round trip tests .PHONY: roundtripcheck roundtripcheck: roundtrip check $(TESTPROG) ./test/RoundTripTest$(EXT) $(TESTFLAGS) # Build the main binary pzstd$(EXT): main.o Options.o Pzstd.o SkippableFrame.o $(ZSTDDIR)/libzstd.a $(LD_COMMAND) # Target that depends on all the tests .PHONY: tests tests: EXTRA_FLAGS += -Wno-deprecated-declarations tests: $(patsubst %,%$(EXT),$(basename $(PZSTD_TESTS) $(UTILS_TESTS))) # Build the round trip tests .PHONY: roundtrip roundtrip: EXTRA_FLAGS += -Wno-deprecated-declarations roundtrip: test/RoundTripTest$(EXT) # Use the static library that zstd builds for simplicity and # so we get the compiler options correct $(ZSTDDIR)/libzstd.a: $(ZSTD_FILES) CFLAGS="$(ALL_CFLAGS)" LDFLAGS="$(ALL_LDFLAGS)" $(MAKE) -C $(ZSTDDIR) libzstd.a # Rules to build the tests test/RoundTripTest$(EXT): test/RoundTripTest.o $(PROGDIR)/datagen.o Options.o \ Pzstd.o SkippableFrame.o $(ZSTDDIR)/libzstd.a $(LD_COMMAND) test/%Test$(EXT): PZSTD_LDFLAGS += $(GTEST_LIB) test/%Test$(EXT): LIBS += -lgtest -lgtest_main test/%Test$(EXT): test/%Test.o $(PROGDIR)/datagen.o Options.o Pzstd.o \ SkippableFrame.o $(ZSTDDIR)/libzstd.a $(LD_COMMAND) utils/test/%Test$(EXT): PZSTD_LDFLAGS += $(GTEST_LIB) utils/test/%Test$(EXT): LIBS += -lgtest -lgtest_main utils/test/%Test$(EXT): utils/test/%Test.o $(LD_COMMAND) GTEST_CMAKEFLAGS = # Install googletest .PHONY: googletest googletest: PZSTD_CCXXFLAGS += -fPIC googletest: @$(RM) -rf googletest @git clone https://github.com/google/googletest @mkdir -p googletest/build @cd googletest/build && cmake $(GTEST_CMAKEFLAGS) -DCMAKE_CXX_FLAGS="$(ALL_CXXFLAGS)" .. && $(MAKE) .PHONY: googletest32 googletest32: PZSTD_CCXXFLAGS += -m32 googletest32: googletest .PHONY: googletest-mingw64 googletest-mingw64: GTEST_CMAKEFLAGS += -G "MSYS Makefiles" googletest-mingw64: googletest .PHONY: clean clean: $(RM) -f *.o pzstd$(EXT) *.Td *.d $(RM) -f test/*.o test/*Test$(EXT) test/*.Td test/*.d $(RM) -f utils/test/*.o utils/test/*Test$(EXT) utils/test/*.Td utils/test/*.d $(RM) -f $(PROGDIR)/*.o $(PROGDIR)/*.Td $(PROGDIR)/*.d $(MAKE) -C $(ZSTDDIR) clean @echo Cleaning completed # Cancel implicit rules %.o: %.c %.o: %.cpp # Object file rules %.o: %.c $(CC_COMMAND) $(POSTCOMPILE) $(PROGDIR)/%.o: $(PROGDIR)/%.c $(CC_COMMAND) $(POSTCOMPILE) %.o: %.cpp $(CXX_COMMAND) $(POSTCOMPILE) test/%.o: PZSTD_CPPFLAGS += $(GTEST_INC) test/%.o: test/%.cpp $(CXX_COMMAND) $(POSTCOMPILE) utils/test/%.o: PZSTD_CPPFLAGS += $(GTEST_INC) utils/test/%.o: utils/test/%.cpp $(CXX_COMMAND) $(POSTCOMPILE) # Dependency file stuff .PRECIOUS: %.d test/%.d utils/test/%.d # Include rules that specify header file dependencies -include $(patsubst %,%.d,$(basename $(ALL_SRCS))) Index: vendor/zstd/dist/contrib/pzstd/Options.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/Options.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Options.cpp (revision 325597) @@ -1,439 +1,439 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "Options.h" #include "util.h" #include "utils/ScopeGuard.h" #include #include #include #include #include #include #include #if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) || \ defined(__CYGWIN__) #include /* _isatty */ #define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream)) #elif defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) || defined(_POSIX_SOURCE) || (defined(__APPLE__) && defined(__MACH__)) || \ defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* https://sourceforge.net/p/predef/wiki/OperatingSystems/ */ #include /* isatty */ #define IS_CONSOLE(stdStream) isatty(fileno(stdStream)) #else #define IS_CONSOLE(stdStream) 0 #endif namespace pzstd { namespace { unsigned defaultNumThreads() { #ifdef PZSTD_NUM_THREADS return PZSTD_NUM_THREADS; #else return std::thread::hardware_concurrency(); #endif } unsigned parseUnsigned(const char **arg) { unsigned result = 0; while (**arg >= '0' && **arg <= '9') { result *= 10; result += **arg - '0'; ++(*arg); } return result; } const char *getArgument(const char *options, const char **argv, int &i, int argc) { if (options[1] != 0) { return options + 1; } ++i; if (i == argc) { std::fprintf(stderr, "Option -%c requires an argument, but none provided\n", *options); return nullptr; } return argv[i]; } const std::string kZstdExtension = ".zst"; constexpr char kStdIn[] = "-"; constexpr char kStdOut[] = "-"; constexpr unsigned kDefaultCompressionLevel = 3; constexpr unsigned kMaxNonUltraCompressionLevel = 19; #ifdef _WIN32 const char nullOutput[] = "nul"; #else const char nullOutput[] = "/dev/null"; #endif void notSupported(const char *option) { std::fprintf(stderr, "Operation not supported: %s\n", option); } void usage() { std::fprintf(stderr, "Usage:\n"); std::fprintf(stderr, " pzstd [args] [FILE(s)]\n"); std::fprintf(stderr, "Parallel ZSTD options:\n"); std::fprintf(stderr, " -p, --processes # : number of threads to use for (de)compression (default:%d)\n", defaultNumThreads()); std::fprintf(stderr, "ZSTD options:\n"); std::fprintf(stderr, " -# : # compression level (1-%d, default:%d)\n", kMaxNonUltraCompressionLevel, kDefaultCompressionLevel); std::fprintf(stderr, " -d, --decompress : decompression\n"); std::fprintf(stderr, " -o file : result stored into `file` (only if 1 input file)\n"); std::fprintf(stderr, " -f, --force : overwrite output without prompting, (de)compress links\n"); std::fprintf(stderr, " --rm : remove source file(s) after successful (de)compression\n"); std::fprintf(stderr, " -k, --keep : preserve source file(s) (default)\n"); std::fprintf(stderr, " -h, --help : display help and exit\n"); std::fprintf(stderr, " -V, --version : display version number and exit\n"); std::fprintf(stderr, " -v, --verbose : verbose mode; specify multiple times to increase log level (default:2)\n"); std::fprintf(stderr, " -q, --quiet : suppress warnings; specify twice to suppress errors too\n"); std::fprintf(stderr, " -c, --stdout : force write to standard output, even if it is the console\n"); #ifdef UTIL_HAS_CREATEFILELIST std::fprintf(stderr, " -r : operate recursively on directories\n"); #endif std::fprintf(stderr, " --ultra : enable levels beyond %i, up to %i (requires more memory)\n", kMaxNonUltraCompressionLevel, ZSTD_maxCLevel()); std::fprintf(stderr, " -C, --check : integrity check (default)\n"); std::fprintf(stderr, " --no-check : no integrity check\n"); std::fprintf(stderr, " -t, --test : test compressed file integrity\n"); std::fprintf(stderr, " -- : all arguments after \"--\" are treated as files\n"); } } // anonymous namespace Options::Options() : numThreads(defaultNumThreads()), maxWindowLog(23), compressionLevel(kDefaultCompressionLevel), decompress(false), overwrite(false), keepSource(true), writeMode(WriteMode::Auto), checksum(true), verbosity(2) {} Options::Status Options::parse(int argc, const char **argv) { bool test = false; bool recursive = false; bool ultra = false; bool forceStdout = false; bool followLinks = false; // Local copy of input files, which are pointers into argv. std::vector localInputFiles; for (int i = 1; i < argc; ++i) { const char *arg = argv[i]; // Protect against empty arguments if (arg[0] == 0) { continue; } // Everything after "--" is an input file if (!std::strcmp(arg, "--")) { ++i; std::copy(argv + i, argv + argc, std::back_inserter(localInputFiles)); break; } // Long arguments that don't have a short option { bool isLongOption = true; if (!std::strcmp(arg, "--rm")) { keepSource = false; } else if (!std::strcmp(arg, "--ultra")) { ultra = true; maxWindowLog = 0; } else if (!std::strcmp(arg, "--no-check")) { checksum = false; } else if (!std::strcmp(arg, "--sparse")) { writeMode = WriteMode::Sparse; notSupported("Sparse mode"); return Status::Failure; } else if (!std::strcmp(arg, "--no-sparse")) { writeMode = WriteMode::Regular; notSupported("Sparse mode"); return Status::Failure; } else if (!std::strcmp(arg, "--dictID")) { notSupported(arg); return Status::Failure; } else if (!std::strcmp(arg, "--no-dictID")) { notSupported(arg); return Status::Failure; } else { isLongOption = false; } if (isLongOption) { continue; } } // Arguments with a short option simply set their short option. const char *options = nullptr; if (!std::strcmp(arg, "--processes")) { options = "p"; } else if (!std::strcmp(arg, "--version")) { options = "V"; } else if (!std::strcmp(arg, "--help")) { options = "h"; } else if (!std::strcmp(arg, "--decompress")) { options = "d"; } else if (!std::strcmp(arg, "--force")) { options = "f"; } else if (!std::strcmp(arg, "--stdout")) { options = "c"; } else if (!std::strcmp(arg, "--keep")) { options = "k"; } else if (!std::strcmp(arg, "--verbose")) { options = "v"; } else if (!std::strcmp(arg, "--quiet")) { options = "q"; } else if (!std::strcmp(arg, "--check")) { options = "C"; } else if (!std::strcmp(arg, "--test")) { options = "t"; } else if (arg[0] == '-' && arg[1] != 0) { options = arg + 1; } else { localInputFiles.emplace_back(arg); continue; } assert(options != nullptr); bool finished = false; while (!finished && *options != 0) { // Parse the compression level if (*options >= '0' && *options <= '9') { compressionLevel = parseUnsigned(&options); continue; } switch (*options) { case 'h': case 'H': usage(); return Status::Message; case 'V': std::fprintf(stderr, "PZSTD version: %s.\n", ZSTD_VERSION_STRING); return Status::Message; case 'p': { finished = true; const char *optionArgument = getArgument(options, argv, i, argc); if (optionArgument == nullptr) { return Status::Failure; } if (*optionArgument < '0' || *optionArgument > '9') { std::fprintf(stderr, "Option -p expects a number, but %s provided\n", optionArgument); return Status::Failure; } numThreads = parseUnsigned(&optionArgument); if (*optionArgument != 0) { std::fprintf(stderr, "Option -p expects a number, but %u%s provided\n", numThreads, optionArgument); return Status::Failure; } break; } case 'o': { finished = true; const char *optionArgument = getArgument(options, argv, i, argc); if (optionArgument == nullptr) { return Status::Failure; } outputFile = optionArgument; break; } case 'C': checksum = true; break; case 'k': keepSource = true; break; case 'd': decompress = true; break; case 'f': overwrite = true; forceStdout = true; followLinks = true; break; case 't': test = true; decompress = true; break; #ifdef UTIL_HAS_CREATEFILELIST case 'r': recursive = true; break; #endif case 'c': outputFile = kStdOut; forceStdout = true; break; case 'v': ++verbosity; break; case 'q': --verbosity; // Ignore them for now break; // Unsupported options from Zstd case 'D': case 's': notSupported("Zstd dictionaries."); return Status::Failure; case 'b': case 'e': case 'i': case 'B': notSupported("Zstd benchmarking options."); return Status::Failure; default: std::fprintf(stderr, "Invalid argument: %s\n", arg); return Status::Failure; } if (!finished) { ++options; } } // while (*options != 0); } // for (int i = 1; i < argc; ++i); // Set options for test mode if (test) { outputFile = nullOutput; keepSource = true; } // Input file defaults to standard input if not provided. if (localInputFiles.empty()) { localInputFiles.emplace_back(kStdIn); } // Check validity of input files if (localInputFiles.size() > 1) { const auto it = std::find(localInputFiles.begin(), localInputFiles.end(), std::string{kStdIn}); if (it != localInputFiles.end()) { std::fprintf( stderr, "Cannot specify standard input when handling multiple files\n"); return Status::Failure; } } if (localInputFiles.size() > 1 || recursive) { if (!outputFile.empty() && outputFile != nullOutput) { std::fprintf( stderr, "Cannot specify an output file when handling multiple inputs\n"); return Status::Failure; } } g_utilDisplayLevel = verbosity; // Remove local input files that are symbolic links if (!followLinks) { std::remove_if(localInputFiles.begin(), localInputFiles.end(), [&](const char *path) { bool isLink = UTIL_isLink(path); if (isLink && verbosity >= 2) { std::fprintf( stderr, "Warning : %s is symbolic link, ignoring\n", path); } return isLink; }); } // Translate input files/directories into files to (de)compress if (recursive) { char *scratchBuffer = nullptr; unsigned numFiles = 0; const char **files = UTIL_createFileList(localInputFiles.data(), localInputFiles.size(), &scratchBuffer, &numFiles, followLinks); if (files == nullptr) { std::fprintf(stderr, "Error traversing directories\n"); return Status::Failure; } auto guard = makeScopeGuard([&] { UTIL_freeFileList(files, scratchBuffer); }); if (numFiles == 0) { std::fprintf(stderr, "No files found\n"); return Status::Failure; } inputFiles.resize(numFiles); std::copy(files, files + numFiles, inputFiles.begin()); } else { inputFiles.resize(localInputFiles.size()); std::copy(localInputFiles.begin(), localInputFiles.end(), inputFiles.begin()); } localInputFiles.clear(); assert(!inputFiles.empty()); // If reading from standard input, default to standard output if (inputFiles[0] == kStdIn && outputFile.empty()) { assert(inputFiles.size() == 1); outputFile = "-"; } if (inputFiles[0] == kStdIn && IS_CONSOLE(stdin)) { assert(inputFiles.size() == 1); std::fprintf(stderr, "Cannot read input from interactive console\n"); return Status::Failure; } if (outputFile == "-" && IS_CONSOLE(stdout) && !(forceStdout && decompress)) { std::fprintf(stderr, "Will not write to console stdout unless -c or -f is " "specified and decompressing\n"); return Status::Failure; } // Check compression level { unsigned maxCLevel = ultra ? ZSTD_maxCLevel() : kMaxNonUltraCompressionLevel; if (compressionLevel > maxCLevel || compressionLevel == 0) { std::fprintf(stderr, "Invalid compression level %u.\n", compressionLevel); return Status::Failure; } } // Check that numThreads is set if (numThreads == 0) { std::fprintf(stderr, "Invalid arguments: # of threads not specified " "and unable to determine hardware concurrency.\n"); return Status::Failure; } // Modify verbosity // If we are piping input and output, turn off interaction if (inputFiles[0] == kStdIn && outputFile == kStdOut && verbosity == 2) { verbosity = 1; } // If we are in multi-file mode, turn off interaction if (inputFiles.size() > 1 && verbosity == 2) { verbosity = 1; } return Status::Success; } std::string Options::getOutputFile(const std::string &inputFile) const { if (!outputFile.empty()) { return outputFile; } // Attempt to add/remove zstd extension from the input file if (decompress) { int stemSize = inputFile.size() - kZstdExtension.size(); if (stemSize > 0 && inputFile.substr(stemSize) == kZstdExtension) { return inputFile.substr(0, stemSize); } else { return ""; } } else { return inputFile + kZstdExtension; } } } Index: vendor/zstd/dist/contrib/pzstd/Options.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/Options.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Options.h (revision 325597) @@ -1,68 +1,68 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" #undef ZSTD_STATIC_LINKING_ONLY #include #include #include namespace pzstd { struct Options { enum class WriteMode { Regular, Auto, Sparse }; unsigned numThreads; unsigned maxWindowLog; unsigned compressionLevel; bool decompress; std::vector inputFiles; std::string outputFile; bool overwrite; bool keepSource; WriteMode writeMode; bool checksum; int verbosity; enum class Status { Success, // Successfully parsed options Failure, // Failure to parse options Message // Options specified to print a message (e.g. "-h") }; Options(); Options(unsigned numThreads, unsigned maxWindowLog, unsigned compressionLevel, bool decompress, std::vector inputFiles, std::string outputFile, bool overwrite, bool keepSource, WriteMode writeMode, bool checksum, int verbosity) : numThreads(numThreads), maxWindowLog(maxWindowLog), compressionLevel(compressionLevel), decompress(decompress), inputFiles(std::move(inputFiles)), outputFile(std::move(outputFile)), overwrite(overwrite), keepSource(keepSource), writeMode(writeMode), checksum(checksum), verbosity(verbosity) {} Status parse(int argc, const char **argv); ZSTD_parameters determineParameters() const { ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, 0); params.fParams.contentSizeFlag = 0; params.fParams.checksumFlag = checksum; if (maxWindowLog != 0 && params.cParams.windowLog > maxWindowLog) { params.cParams.windowLog = maxWindowLog; params.cParams = ZSTD_adjustCParams(params.cParams, 0, 0); } return params; } std::string getOutputFile(const std::string &inputFile) const; }; } Index: vendor/zstd/dist/contrib/pzstd/Pzstd.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/Pzstd.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Pzstd.cpp (revision 325597) @@ -1,618 +1,618 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "Pzstd.h" #include "SkippableFrame.h" #include "utils/FileSystem.h" #include "utils/Range.h" #include "utils/ScopeGuard.h" #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" #include #include #include #include #include #include #if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) || defined(__CYGWIN__) # include /* _O_BINARY */ # include /* _setmode, _isatty */ # define SET_BINARY_MODE(file) { if (_setmode(_fileno(file), _O_BINARY) == -1) perror("Cannot set _O_BINARY"); } #else # include /* isatty */ # define SET_BINARY_MODE(file) #endif namespace pzstd { namespace { #ifdef _WIN32 const std::string nullOutput = "nul"; #else const std::string nullOutput = "/dev/null"; #endif } using std::size_t; static std::uintmax_t fileSizeOrZero(const std::string &file) { if (file == "-") { return 0; } std::error_code ec; auto size = file_size(file, ec); if (ec) { size = 0; } return size; } static std::uint64_t handleOneInput(const Options &options, const std::string &inputFile, FILE* inputFd, const std::string &outputFile, FILE* outputFd, SharedState& state) { auto inputSize = fileSizeOrZero(inputFile); // WorkQueue outlives ThreadPool so in the case of error we are certain // we don't accidently try to call push() on it after it is destroyed WorkQueue> outs{options.numThreads + 1}; std::uint64_t bytesRead; std::uint64_t bytesWritten; { // Initialize the (de)compression thread pool with numThreads ThreadPool executor(options.numThreads); // Run the reader thread on an extra thread ThreadPool readExecutor(1); if (!options.decompress) { // Add a job that reads the input and starts all the compression jobs readExecutor.add( [&state, &outs, &executor, inputFd, inputSize, &options, &bytesRead] { bytesRead = asyncCompressChunks( state, outs, executor, inputFd, inputSize, options.numThreads, options.determineParameters()); }); // Start writing bytesWritten = writeFile(state, outs, outputFd, options.decompress); } else { // Add a job that reads the input and starts all the decompression jobs readExecutor.add([&state, &outs, &executor, inputFd, &bytesRead] { bytesRead = asyncDecompressFrames(state, outs, executor, inputFd); }); // Start writing bytesWritten = writeFile(state, outs, outputFd, options.decompress); } } if (!state.errorHolder.hasError()) { std::string inputFileName = inputFile == "-" ? "stdin" : inputFile; std::string outputFileName = outputFile == "-" ? "stdout" : outputFile; if (!options.decompress) { double ratio = static_cast(bytesWritten) / static_cast(bytesRead + !bytesRead); state.log(INFO, "%-20s :%6.2f%% (%6" PRIu64 " => %6" PRIu64 " bytes, %s)\n", inputFileName.c_str(), ratio * 100, bytesRead, bytesWritten, outputFileName.c_str()); } else { state.log(INFO, "%-20s: %" PRIu64 " bytes \n", inputFileName.c_str(),bytesWritten); } } return bytesWritten; } static FILE *openInputFile(const std::string &inputFile, ErrorHolder &errorHolder) { if (inputFile == "-") { SET_BINARY_MODE(stdin); return stdin; } // Check if input file is a directory { std::error_code ec; if (is_directory(inputFile, ec)) { errorHolder.setError("Output file is a directory -- ignored"); return nullptr; } } auto inputFd = std::fopen(inputFile.c_str(), "rb"); if (!errorHolder.check(inputFd != nullptr, "Failed to open input file")) { return nullptr; } return inputFd; } static FILE *openOutputFile(const Options &options, const std::string &outputFile, SharedState& state) { if (outputFile == "-") { SET_BINARY_MODE(stdout); return stdout; } // Check if the output file exists and then open it if (!options.overwrite && outputFile != nullOutput) { auto outputFd = std::fopen(outputFile.c_str(), "rb"); if (outputFd != nullptr) { std::fclose(outputFd); if (!state.log.logsAt(INFO)) { state.errorHolder.setError("Output file exists"); return nullptr; } state.log( INFO, "pzstd: %s already exists; do you wish to overwrite (y/n) ? ", outputFile.c_str()); int c = getchar(); if (c != 'y' && c != 'Y') { state.errorHolder.setError("Not overwritten"); return nullptr; } } } auto outputFd = std::fopen(outputFile.c_str(), "wb"); if (!state.errorHolder.check( outputFd != nullptr, "Failed to open output file")) { return nullptr; } return outputFd; } int pzstdMain(const Options &options) { int returnCode = 0; SharedState state(options); for (const auto& input : options.inputFiles) { // Setup the shared state auto printErrorGuard = makeScopeGuard([&] { if (state.errorHolder.hasError()) { returnCode = 1; state.log(ERROR, "pzstd: %s: %s.\n", input.c_str(), state.errorHolder.getError().c_str()); } }); // Open the input file auto inputFd = openInputFile(input, state.errorHolder); if (inputFd == nullptr) { continue; } auto closeInputGuard = makeScopeGuard([&] { std::fclose(inputFd); }); // Open the output file auto outputFile = options.getOutputFile(input); if (!state.errorHolder.check(outputFile != "", "Input file does not have extension .zst")) { continue; } auto outputFd = openOutputFile(options, outputFile, state); if (outputFd == nullptr) { continue; } auto closeOutputGuard = makeScopeGuard([&] { std::fclose(outputFd); }); // (de)compress the file handleOneInput(options, input, inputFd, outputFile, outputFd, state); if (state.errorHolder.hasError()) { continue; } // Delete the input file if necessary if (!options.keepSource) { // Be sure that we are done and have written everything before we delete if (!state.errorHolder.check(std::fclose(inputFd) == 0, "Failed to close input file")) { continue; } closeInputGuard.dismiss(); if (!state.errorHolder.check(std::fclose(outputFd) == 0, "Failed to close output file")) { continue; } closeOutputGuard.dismiss(); if (std::remove(input.c_str()) != 0) { state.errorHolder.setError("Failed to remove input file"); continue; } } } // Returns 1 if any of the files failed to (de)compress. return returnCode; } /// Construct a `ZSTD_inBuffer` that points to the data in `buffer`. static ZSTD_inBuffer makeZstdInBuffer(const Buffer& buffer) { return ZSTD_inBuffer{buffer.data(), buffer.size(), 0}; } /** * Advance `buffer` and `inBuffer` by the amount of data read, as indicated by * `inBuffer.pos`. */ void advance(Buffer& buffer, ZSTD_inBuffer& inBuffer) { auto pos = inBuffer.pos; inBuffer.src = static_cast(inBuffer.src) + pos; inBuffer.size -= pos; inBuffer.pos = 0; return buffer.advance(pos); } /// Construct a `ZSTD_outBuffer` that points to the data in `buffer`. static ZSTD_outBuffer makeZstdOutBuffer(Buffer& buffer) { return ZSTD_outBuffer{buffer.data(), buffer.size(), 0}; } /** * Split `buffer` and advance `outBuffer` by the amount of data written, as * indicated by `outBuffer.pos`. */ Buffer split(Buffer& buffer, ZSTD_outBuffer& outBuffer) { auto pos = outBuffer.pos; outBuffer.dst = static_cast(outBuffer.dst) + pos; outBuffer.size -= pos; outBuffer.pos = 0; return buffer.splitAt(pos); } /** * Stream chunks of input from `in`, compress it, and stream it out to `out`. * * @param state The shared state * @param in Queue that we `pop()` input buffers from * @param out Queue that we `push()` compressed output buffers to * @param maxInputSize An upper bound on the size of the input */ static void compress( SharedState& state, std::shared_ptr in, std::shared_ptr out, size_t maxInputSize) { auto& errorHolder = state.errorHolder; auto guard = makeScopeGuard([&] { out->finish(); }); // Initialize the CCtx auto ctx = state.cStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_CStream")) { return; } { auto err = ZSTD_resetCStream(ctx.get(), 0); if (!errorHolder.check(!ZSTD_isError(err), ZSTD_getErrorName(err))) { return; } } // Allocate space for the result auto outBuffer = Buffer(ZSTD_compressBound(maxInputSize)); auto zstdOutBuffer = makeZstdOutBuffer(outBuffer); { Buffer inBuffer; // Read a buffer in from the input queue while (in->pop(inBuffer) && !errorHolder.hasError()) { auto zstdInBuffer = makeZstdInBuffer(inBuffer); // Compress the whole buffer and send it to the output queue while (!inBuffer.empty() && !errorHolder.hasError()) { if (!errorHolder.check( !outBuffer.empty(), "ZSTD_compressBound() was too small")) { return; } // Compress auto err = ZSTD_compressStream(ctx.get(), &zstdOutBuffer, &zstdInBuffer); if (!errorHolder.check(!ZSTD_isError(err), ZSTD_getErrorName(err))) { return; } // Split the compressed data off outBuffer and pass to the output queue out->push(split(outBuffer, zstdOutBuffer)); // Forget about the data we already compressed advance(inBuffer, zstdInBuffer); } } } // Write the epilog size_t bytesLeft; do { if (!errorHolder.check( !outBuffer.empty(), "ZSTD_compressBound() was too small")) { return; } bytesLeft = ZSTD_endStream(ctx.get(), &zstdOutBuffer); if (!errorHolder.check( !ZSTD_isError(bytesLeft), ZSTD_getErrorName(bytesLeft))) { return; } out->push(split(outBuffer, zstdOutBuffer)); } while (bytesLeft != 0 && !errorHolder.hasError()); } /** * Calculates how large each independently compressed frame should be. * * @param size The size of the source if known, 0 otherwise * @param numThreads The number of threads available to run compression jobs on * @param params The zstd parameters to be used for compression */ static size_t calculateStep( std::uintmax_t size, size_t numThreads, const ZSTD_parameters ¶ms) { (void)size; (void)numThreads; return size_t{1} << (params.cParams.windowLog + 2); } namespace { enum class FileStatus { Continue, Done, Error }; /// Determines the status of the file descriptor `fd`. FileStatus fileStatus(FILE* fd) { if (std::feof(fd)) { return FileStatus::Done; } else if (std::ferror(fd)) { return FileStatus::Error; } return FileStatus::Continue; } } // anonymous namespace /** * Reads `size` data in chunks of `chunkSize` and puts it into `queue`. * Will read less if an error or EOF occurs. * Returns the status of the file after all of the reads have occurred. */ static FileStatus readData(BufferWorkQueue& queue, size_t chunkSize, size_t size, FILE* fd, std::uint64_t *totalBytesRead) { Buffer buffer(size); while (!buffer.empty()) { auto bytesRead = std::fread(buffer.data(), 1, std::min(chunkSize, buffer.size()), fd); *totalBytesRead += bytesRead; queue.push(buffer.splitAt(bytesRead)); auto status = fileStatus(fd); if (status != FileStatus::Continue) { return status; } } return FileStatus::Continue; } std::uint64_t asyncCompressChunks( SharedState& state, WorkQueue>& chunks, ThreadPool& executor, FILE* fd, std::uintmax_t size, size_t numThreads, ZSTD_parameters params) { auto chunksGuard = makeScopeGuard([&] { chunks.finish(); }); std::uint64_t bytesRead = 0; // Break the input up into chunks of size `step` and compress each chunk // independently. size_t step = calculateStep(size, numThreads, params); state.log(DEBUG, "Chosen frame size: %zu\n", step); auto status = FileStatus::Continue; while (status == FileStatus::Continue && !state.errorHolder.hasError()) { // Make a new input queue that we will put the chunk's input data into. auto in = std::make_shared(); auto inGuard = makeScopeGuard([&] { in->finish(); }); // Make a new output queue that compress will put the compressed data into. auto out = std::make_shared(); // Start compression in the thread pool executor.add([&state, in, out, step] { return compress( state, std::move(in), std::move(out), step); }); // Pass the output queue to the writer thread. chunks.push(std::move(out)); state.log(VERBOSE, "%s\n", "Starting a new frame"); // Fill the input queue for the compression job we just started status = readData(*in, ZSTD_CStreamInSize(), step, fd, &bytesRead); } state.errorHolder.check(status != FileStatus::Error, "Error reading input"); return bytesRead; } /** * Decompress a frame, whose data is streamed into `in`, and stream the output * to `out`. * * @param state The shared state * @param in Queue that we `pop()` input buffers from. It contains * exactly one compressed frame. * @param out Queue that we `push()` decompressed output buffers to */ static void decompress( SharedState& state, std::shared_ptr in, std::shared_ptr out) { auto& errorHolder = state.errorHolder; auto guard = makeScopeGuard([&] { out->finish(); }); // Initialize the DCtx auto ctx = state.dStreamPool->get(); if (!errorHolder.check(ctx != nullptr, "Failed to allocate ZSTD_DStream")) { return; } { auto err = ZSTD_resetDStream(ctx.get()); if (!errorHolder.check(!ZSTD_isError(err), ZSTD_getErrorName(err))) { return; } } const size_t outSize = ZSTD_DStreamOutSize(); Buffer inBuffer; size_t returnCode = 0; // Read a buffer in from the input queue while (in->pop(inBuffer) && !errorHolder.hasError()) { auto zstdInBuffer = makeZstdInBuffer(inBuffer); // Decompress the whole buffer and send it to the output queue while (!inBuffer.empty() && !errorHolder.hasError()) { // Allocate a buffer with at least outSize bytes. Buffer outBuffer(outSize); auto zstdOutBuffer = makeZstdOutBuffer(outBuffer); // Decompress returnCode = ZSTD_decompressStream(ctx.get(), &zstdOutBuffer, &zstdInBuffer); if (!errorHolder.check( !ZSTD_isError(returnCode), ZSTD_getErrorName(returnCode))) { return; } // Pass the buffer with the decompressed data to the output queue out->push(split(outBuffer, zstdOutBuffer)); // Advance past the input we already read advance(inBuffer, zstdInBuffer); if (returnCode == 0) { // The frame is over, prepare to (maybe) start a new frame ZSTD_initDStream(ctx.get()); } } } if (!errorHolder.check(returnCode <= 1, "Incomplete block")) { return; } // We've given ZSTD_decompressStream all of our data, but there may still // be data to read. while (returnCode == 1) { // Allocate a buffer with at least outSize bytes. Buffer outBuffer(outSize); auto zstdOutBuffer = makeZstdOutBuffer(outBuffer); // Pass in no input. ZSTD_inBuffer zstdInBuffer{nullptr, 0, 0}; // Decompress returnCode = ZSTD_decompressStream(ctx.get(), &zstdOutBuffer, &zstdInBuffer); if (!errorHolder.check( !ZSTD_isError(returnCode), ZSTD_getErrorName(returnCode))) { return; } // Pass the buffer with the decompressed data to the output queue out->push(split(outBuffer, zstdOutBuffer)); } } std::uint64_t asyncDecompressFrames( SharedState& state, WorkQueue>& frames, ThreadPool& executor, FILE* fd) { auto framesGuard = makeScopeGuard([&] { frames.finish(); }); std::uint64_t totalBytesRead = 0; // Split the source up into its component frames. // If we find our recognized skippable frame we know the next frames size // which means that we can decompress each standard frame in independently. // Otherwise, we will decompress using only one decompression task. const size_t chunkSize = ZSTD_DStreamInSize(); auto status = FileStatus::Continue; while (status == FileStatus::Continue && !state.errorHolder.hasError()) { // Make a new input queue that we will put the frames's bytes into. auto in = std::make_shared(); auto inGuard = makeScopeGuard([&] { in->finish(); }); // Make a output queue that decompress will put the decompressed data into auto out = std::make_shared(); size_t frameSize; { // Calculate the size of the next frame. // frameSize is 0 if the frame info can't be decoded. Buffer buffer(SkippableFrame::kSize); auto bytesRead = std::fread(buffer.data(), 1, buffer.size(), fd); totalBytesRead += bytesRead; status = fileStatus(fd); if (bytesRead == 0 && status != FileStatus::Continue) { break; } buffer.subtract(buffer.size() - bytesRead); frameSize = SkippableFrame::tryRead(buffer.range()); in->push(std::move(buffer)); } if (frameSize == 0) { // We hit a non SkippableFrame, so this will be the last job. // Make sure that we don't use too much memory in->setMaxSize(64); out->setMaxSize(64); } // Start decompression in the thread pool executor.add([&state, in, out] { return decompress(state, std::move(in), std::move(out)); }); // Pass the output queue to the writer thread frames.push(std::move(out)); if (frameSize == 0) { // We hit a non SkippableFrame ==> not compressed by pzstd or corrupted // Pass the rest of the source to this decompression task state.log(VERBOSE, "%s\n", "Input not in pzstd format, falling back to serial decompression"); while (status == FileStatus::Continue && !state.errorHolder.hasError()) { status = readData(*in, chunkSize, chunkSize, fd, &totalBytesRead); } break; } state.log(VERBOSE, "Decompressing a frame of size %zu", frameSize); // Fill the input queue for the decompression job we just started status = readData(*in, chunkSize, frameSize, fd, &totalBytesRead); } state.errorHolder.check(status != FileStatus::Error, "Error reading input"); return totalBytesRead; } /// Write `data` to `fd`, returns true iff success. static bool writeData(ByteRange data, FILE* fd) { while (!data.empty()) { data.advance(std::fwrite(data.begin(), 1, data.size(), fd)); if (std::ferror(fd)) { return false; } } return true; } std::uint64_t writeFile( SharedState& state, WorkQueue>& outs, FILE* outputFd, bool decompress) { auto& errorHolder = state.errorHolder; auto lineClearGuard = makeScopeGuard([&state] { state.log.clear(INFO); }); std::uint64_t bytesWritten = 0; std::shared_ptr out; // Grab the output queue for each decompression job (in order). while (outs.pop(out)) { if (errorHolder.hasError()) { continue; } if (!decompress) { // If we are compressing and want to write skippable frames we can't // start writing before compression is done because we need to know the // compressed size. // Wait for the compressed size to be available and write skippable frame SkippableFrame frame(out->size()); if (!writeData(frame.data(), outputFd)) { errorHolder.setError("Failed to write output"); return bytesWritten; } bytesWritten += frame.kSize; } // For each chunk of the frame: Pop it from the queue and write it Buffer buffer; while (out->pop(buffer) && !errorHolder.hasError()) { if (!writeData(buffer.range(), outputFd)) { errorHolder.setError("Failed to write output"); return bytesWritten; } bytesWritten += buffer.size(); state.log.update(INFO, "Written: %u MB ", static_cast(bytesWritten >> 20)); } } return bytesWritten; } } Index: vendor/zstd/dist/contrib/pzstd/Pzstd.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/Pzstd.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/Pzstd.h (revision 325597) @@ -1,150 +1,150 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "ErrorHolder.h" #include "Logging.h" #include "Options.h" #include "utils/Buffer.h" #include "utils/Range.h" #include "utils/ResourcePool.h" #include "utils/ThreadPool.h" #include "utils/WorkQueue.h" #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" #undef ZSTD_STATIC_LINKING_ONLY #include #include #include namespace pzstd { /** * Runs pzstd with `options` and returns the number of bytes written. * An error occurred if `errorHandler.hasError()`. * * @param options The pzstd options to use for (de)compression * @returns 0 upon success and non-zero on failure. */ int pzstdMain(const Options& options); class SharedState { public: SharedState(const Options& options) : log(options.verbosity) { if (!options.decompress) { auto parameters = options.determineParameters(); cStreamPool.reset(new ResourcePool{ [this, parameters]() -> ZSTD_CStream* { this->log(VERBOSE, "%s\n", "Creating new ZSTD_CStream"); auto zcs = ZSTD_createCStream(); if (zcs) { auto err = ZSTD_initCStream_advanced( zcs, nullptr, 0, parameters, 0); if (ZSTD_isError(err)) { ZSTD_freeCStream(zcs); return nullptr; } } return zcs; }, [](ZSTD_CStream *zcs) { ZSTD_freeCStream(zcs); }}); } else { dStreamPool.reset(new ResourcePool{ [this]() -> ZSTD_DStream* { this->log(VERBOSE, "%s\n", "Creating new ZSTD_DStream"); auto zds = ZSTD_createDStream(); if (zds) { auto err = ZSTD_initDStream(zds); if (ZSTD_isError(err)) { ZSTD_freeDStream(zds); return nullptr; } } return zds; }, [](ZSTD_DStream *zds) { ZSTD_freeDStream(zds); }}); } } ~SharedState() { // The resource pools have references to this, so destroy them first. cStreamPool.reset(); dStreamPool.reset(); } Logger log; ErrorHolder errorHolder; std::unique_ptr> cStreamPool; std::unique_ptr> dStreamPool; }; /** * Streams input from `fd`, breaks input up into chunks, and compresses each * chunk independently. Output of each chunk gets streamed to a queue, and * the output queues get put into `chunks` in order. * * @param state The shared state * @param chunks Each compression jobs output queue gets `pushed()` here * as soon as it is available * @param executor The thread pool to run compression jobs in * @param fd The input file descriptor * @param size The size of the input file if known, 0 otherwise * @param numThreads The number of threads in the thread pool * @param parameters The zstd parameters to use for compression * @returns The number of bytes read from the file */ std::uint64_t asyncCompressChunks( SharedState& state, WorkQueue>& chunks, ThreadPool& executor, FILE* fd, std::uintmax_t size, std::size_t numThreads, ZSTD_parameters parameters); /** * Streams input from `fd`. If pzstd headers are available it breaks the input * up into independent frames. It sends each frame to an independent * decompression job. Output of each frame gets streamed to a queue, and * the output queues get put into `frames` in order. * * @param state The shared state * @param frames Each decompression jobs output queue gets `pushed()` here * as soon as it is available * @param executor The thread pool to run compression jobs in * @param fd The input file descriptor * @returns The number of bytes read from the file */ std::uint64_t asyncDecompressFrames( SharedState& state, WorkQueue>& frames, ThreadPool& executor, FILE* fd); /** * Streams input in from each queue in `outs` in order, and writes the data to * `outputFd`. * * @param state The shared state * @param outs A queue of output queues, one for each * (de)compression job. * @param outputFd The file descriptor to write to * @param decompress Are we decompressing? * @returns The number of bytes written */ std::uint64_t writeFile( SharedState& state, WorkQueue>& outs, FILE* outputFd, bool decompress); } Index: vendor/zstd/dist/contrib/pzstd/SkippableFrame.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/SkippableFrame.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/SkippableFrame.cpp (revision 325597) @@ -1,30 +1,30 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "SkippableFrame.h" #include "mem.h" #include "utils/Range.h" #include using namespace pzstd; SkippableFrame::SkippableFrame(std::uint32_t size) : frameSize_(size) { MEM_writeLE32(data_.data(), kSkippableFrameMagicNumber); MEM_writeLE32(data_.data() + 4, kFrameContentsSize); MEM_writeLE32(data_.data() + 8, frameSize_); } /* static */ std::size_t SkippableFrame::tryRead(ByteRange bytes) { if (bytes.size() < SkippableFrame::kSize || MEM_readLE32(bytes.begin()) != kSkippableFrameMagicNumber || MEM_readLE32(bytes.begin() + 4) != kFrameContentsSize) { return 0; } return MEM_readLE32(bytes.begin() + 8); } Index: vendor/zstd/dist/contrib/pzstd/SkippableFrame.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/SkippableFrame.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/SkippableFrame.h (revision 325597) @@ -1,64 +1,64 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "utils/Range.h" #include #include #include #include namespace pzstd { /** * We put a skippable frame before each frame. * It contains a skippable frame magic number, the size of the skippable frame, * and the size of the next frame. * Each skippable frame is exactly 12 bytes in little endian format. * The first 8 bytes are for compatibility with the ZSTD format. * If we have N threads, the output will look like * * [0x184D2A50|4|size1] [frame1 of size size1] * [0x184D2A50|4|size2] [frame2 of size size2] * ... * [0x184D2A50|4|sizeN] [frameN of size sizeN] * * Each sizeX is 4 bytes. * * These skippable frames should allow us to skip through the compressed file * and only load at most N pages. */ class SkippableFrame { public: static constexpr std::size_t kSize = 12; private: std::uint32_t frameSize_; std::array data_; static constexpr std::uint32_t kSkippableFrameMagicNumber = 0x184D2A50; // Could be improved if the size fits in less bytes static constexpr std::uint32_t kFrameContentsSize = kSize - 8; public: // Write the skippable frame to data_ in LE format. explicit SkippableFrame(std::uint32_t size); // Read the skippable frame from bytes in LE format. static std::size_t tryRead(ByteRange bytes); ByteRange data() const { return {data_.data(), data_.size()}; } // Size of the next frame. std::size_t frameSize() const { return frameSize_; } }; } Index: vendor/zstd/dist/contrib/pzstd/main.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/main.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/main.cpp (revision 325597) @@ -1,27 +1,27 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "ErrorHolder.h" #include "Options.h" #include "Pzstd.h" using namespace pzstd; int main(int argc, const char** argv) { Options options; switch (options.parse(argc, argv)) { case Options::Status::Failure: return 1; case Options::Status::Message: return 0; default: break; } return pzstdMain(options); } Index: vendor/zstd/dist/contrib/pzstd/test/OptionsTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/test/OptionsTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/test/OptionsTest.cpp (revision 325597) @@ -1,536 +1,536 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "Options.h" #include #include using namespace pzstd; namespace pzstd { bool operator==(const Options &lhs, const Options &rhs) { return lhs.numThreads == rhs.numThreads && lhs.maxWindowLog == rhs.maxWindowLog && lhs.compressionLevel == rhs.compressionLevel && lhs.decompress == rhs.decompress && lhs.inputFiles == rhs.inputFiles && lhs.outputFile == rhs.outputFile && lhs.overwrite == rhs.overwrite && lhs.keepSource == rhs.keepSource && lhs.writeMode == rhs.writeMode && lhs.checksum == rhs.checksum && lhs.verbosity == rhs.verbosity; } std::ostream &operator<<(std::ostream &out, const Options &opt) { out << "{"; { out << "\n\t" << "numThreads: " << opt.numThreads; out << ",\n\t" << "maxWindowLog: " << opt.maxWindowLog; out << ",\n\t" << "compressionLevel: " << opt.compressionLevel; out << ",\n\t" << "decompress: " << opt.decompress; out << ",\n\t" << "inputFiles: {"; { bool first = true; for (const auto &file : opt.inputFiles) { if (!first) { out << ","; } first = false; out << "\n\t\t" << file; } } out << "\n\t}"; out << ",\n\t" << "outputFile: " << opt.outputFile; out << ",\n\t" << "overwrite: " << opt.overwrite; out << ",\n\t" << "keepSource: " << opt.keepSource; out << ",\n\t" << "writeMode: " << static_cast(opt.writeMode); out << ",\n\t" << "checksum: " << opt.checksum; out << ",\n\t" << "verbosity: " << opt.verbosity; } out << "\n}"; return out; } } namespace { #ifdef _WIN32 const char nullOutput[] = "nul"; #else const char nullOutput[] = "/dev/null"; #endif constexpr auto autoMode = Options::WriteMode::Auto; } // anonymous namespace #define EXPECT_SUCCESS(...) EXPECT_EQ(Options::Status::Success, __VA_ARGS__) #define EXPECT_FAILURE(...) EXPECT_EQ(Options::Status::Failure, __VA_ARGS__) #define EXPECT_MESSAGE(...) EXPECT_EQ(Options::Status::Message, __VA_ARGS__) template std::array makeArray(Args... args) { return {{nullptr, args...}}; } TEST(Options, ValidInputs) { { Options options; auto args = makeArray("--processes", "5", "-o", "x", "y", "-f"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {5, 23, 3, false, {"y"}, "x", true, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("-p", "1", "input", "-19"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {1, 23, 19, false, {"input"}, "", false, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("--ultra", "-22", "-p", "1", "-o", "x", "-d", "x.zst", "-f"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {1, 0, 22, true, {"x.zst"}, "x", true, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("--processes", "100", "hello.zst", "--decompress", "--force"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {100, 23, 3, true, {"hello.zst"}, "", true, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("x", "-dp", "1", "-c"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {1, 23, 3, true, {"x"}, "-", false, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("x", "-dp", "1", "--stdout"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {1, 23, 3, true, {"x"}, "-", false, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("-p", "1", "x", "-5", "-fo", "-", "--ultra", "-d"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {1, 0, 5, true, {"x"}, "-", true, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("silesia.tar", "-o", "silesia.tar.pzstd", "-p", "2"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {2, 23, 3, false, {"silesia.tar"}, "silesia.tar.pzstd", false, true, autoMode, true, 2}; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("x", "-p", "1"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-p", "1"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); } } TEST(Options, GetOutputFile) { { Options options; auto args = makeArray("x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ("x.zst", options.getOutputFile(options.inputFiles[0])); } { Options options; auto args = makeArray("x", "y", "-o", nullOutput); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(nullOutput, options.getOutputFile(options.inputFiles[0])); } { Options options; auto args = makeArray("x.zst", "-do", nullOutput); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(nullOutput, options.getOutputFile(options.inputFiles[0])); } { Options options; auto args = makeArray("x.zst", "-d"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ("x", options.getOutputFile(options.inputFiles[0])); } { Options options; auto args = makeArray("xzst", "-d"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ("", options.getOutputFile(options.inputFiles[0])); } { Options options; auto args = makeArray("xzst", "-doxx"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ("xx", options.getOutputFile(options.inputFiles[0])); } } TEST(Options, MultipleFiles) { { Options options; auto args = makeArray("x", "y", "z"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected; expected.inputFiles = {"x", "y", "z"}; expected.verbosity = 1; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("x", "y", "z", "-o", nullOutput); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected; expected.inputFiles = {"x", "y", "z"}; expected.outputFile = nullOutput; expected.verbosity = 1; EXPECT_EQ(expected, options); } { Options options; auto args = makeArray("x", "y", "-o-"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "y", "-o", "file"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-qqvd12qp4", "-f", "x", "--", "--rm", "-c"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); Options expected = {4, 23, 12, true, {"x", "--rm", "-c"}, "", true, true, autoMode, true, 0}; EXPECT_EQ(expected, options); } } TEST(Options, NumThreads) { { Options options; auto args = makeArray("x", "-dfo", "-"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-p", "0", "-fo", "-"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-f", "-p", "-o", "-"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, BadCompressionLevel) { { Options options; auto args = makeArray("x", "-20"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "--ultra", "-23"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "--1"); // negative 1? EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, InvalidOption) { { Options options; auto args = makeArray("x", "-x"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, BadOutputFile) { { Options options; auto args = makeArray("notzst", "-d", "-p", "1"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ("", options.getOutputFile(options.inputFiles.front())); } } TEST(Options, BadOptionsWithArguments) { { Options options; auto args = makeArray("x", "-pf"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-p", "10f"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-p"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-o"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("x", "-o"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, KeepSource) { { Options options; auto args = makeArray("x", "--rm", "-k"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.keepSource); } { Options options; auto args = makeArray("x", "--rm", "--keep"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.keepSource); } { Options options; auto args = makeArray("x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.keepSource); } { Options options; auto args = makeArray("x", "--rm"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(false, options.keepSource); } } TEST(Options, Verbosity) { { Options options; auto args = makeArray("x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(2, options.verbosity); } { Options options; auto args = makeArray("--quiet", "-qq", "x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(-1, options.verbosity); } { Options options; auto args = makeArray("x", "y"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(1, options.verbosity); } { Options options; auto args = makeArray("--", "x", "y"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(1, options.verbosity); } { Options options; auto args = makeArray("-qv", "x", "y"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(1, options.verbosity); } { Options options; auto args = makeArray("-v", "x", "y"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(3, options.verbosity); } { Options options; auto args = makeArray("-v", "x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(3, options.verbosity); } } TEST(Options, TestMode) { { Options options; auto args = makeArray("x", "-t"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.keepSource); EXPECT_EQ(true, options.decompress); EXPECT_EQ(nullOutput, options.outputFile); } { Options options; auto args = makeArray("x", "--test", "--rm", "-ohello"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.keepSource); EXPECT_EQ(true, options.decompress); EXPECT_EQ(nullOutput, options.outputFile); } } TEST(Options, Checksum) { { Options options; auto args = makeArray("x.zst", "--no-check", "-Cd"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.checksum); } { Options options; auto args = makeArray("x"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.checksum); } { Options options; auto args = makeArray("x", "--no-check", "--check"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(true, options.checksum); } { Options options; auto args = makeArray("x", "--no-check"); EXPECT_SUCCESS(options.parse(args.size(), args.data())); EXPECT_EQ(false, options.checksum); } } TEST(Options, InputFiles) { { Options options; auto args = makeArray("-cd"); options.parse(args.size(), args.data()); EXPECT_EQ(1, options.inputFiles.size()); EXPECT_EQ("-", options.inputFiles[0]); EXPECT_EQ("-", options.outputFile); } { Options options; auto args = makeArray(); options.parse(args.size(), args.data()); EXPECT_EQ(1, options.inputFiles.size()); EXPECT_EQ("-", options.inputFiles[0]); EXPECT_EQ("-", options.outputFile); } { Options options; auto args = makeArray("-d"); options.parse(args.size(), args.data()); EXPECT_EQ(1, options.inputFiles.size()); EXPECT_EQ("-", options.inputFiles[0]); EXPECT_EQ("-", options.outputFile); } { Options options; auto args = makeArray("x", "-"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, InvalidOptions) { { Options options; auto args = makeArray("-ibasdf"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("- "); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-n15"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-0", "x"); EXPECT_FAILURE(options.parse(args.size(), args.data())); } } TEST(Options, Extras) { { Options options; auto args = makeArray("-h"); EXPECT_MESSAGE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-H"); EXPECT_MESSAGE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("-V"); EXPECT_MESSAGE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("--help"); EXPECT_MESSAGE(options.parse(args.size(), args.data())); } { Options options; auto args = makeArray("--version"); EXPECT_MESSAGE(options.parse(args.size(), args.data())); } } Index: vendor/zstd/dist/contrib/pzstd/test/PzstdTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/test/PzstdTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/test/PzstdTest.cpp (revision 325597) @@ -1,149 +1,149 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "Pzstd.h" extern "C" { #include "datagen.h" } #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" #include #include #include #include #include using namespace std; using namespace pzstd; TEST(Pzstd, SmallSizes) { unsigned seed = std::random_device{}(); std::fprintf(stderr, "Pzstd.SmallSizes seed: %u\n", seed); std::mt19937 gen(seed); for (unsigned len = 1; len < 256; ++len) { if (len % 16 == 0) { std::fprintf(stderr, "%u / 16\n", len / 16); } std::string inputFile = std::tmpnam(nullptr); auto guard = makeScopeGuard([&] { std::remove(inputFile.c_str()); }); { static uint8_t buf[256]; RDG_genBuffer(buf, len, 0.5, 0.0, gen()); auto fd = std::fopen(inputFile.c_str(), "wb"); auto written = std::fwrite(buf, 1, len, fd); std::fclose(fd); ASSERT_EQ(written, len); } for (unsigned numThreads = 1; numThreads <= 2; ++numThreads) { for (unsigned level = 1; level <= 4; level *= 4) { auto errorGuard = makeScopeGuard([&] { std::fprintf(stderr, "# threads: %u\n", numThreads); std::fprintf(stderr, "compression level: %u\n", level); }); Options options; options.overwrite = true; options.inputFiles = {inputFile}; options.numThreads = numThreads; options.compressionLevel = level; options.verbosity = 1; ASSERT_TRUE(roundTrip(options)); errorGuard.dismiss(); } } } } TEST(Pzstd, LargeSizes) { unsigned seed = std::random_device{}(); std::fprintf(stderr, "Pzstd.LargeSizes seed: %u\n", seed); std::mt19937 gen(seed); for (unsigned len = 1 << 20; len <= (1 << 24); len *= 2) { std::string inputFile = std::tmpnam(nullptr); auto guard = makeScopeGuard([&] { std::remove(inputFile.c_str()); }); { std::unique_ptr buf(new uint8_t[len]); RDG_genBuffer(buf.get(), len, 0.5, 0.0, gen()); auto fd = std::fopen(inputFile.c_str(), "wb"); auto written = std::fwrite(buf.get(), 1, len, fd); std::fclose(fd); ASSERT_EQ(written, len); } for (unsigned numThreads = 1; numThreads <= 16; numThreads *= 4) { for (unsigned level = 1; level <= 4; level *= 4) { auto errorGuard = makeScopeGuard([&] { std::fprintf(stderr, "# threads: %u\n", numThreads); std::fprintf(stderr, "compression level: %u\n", level); }); Options options; options.overwrite = true; options.inputFiles = {inputFile}; options.numThreads = std::min(numThreads, options.numThreads); options.compressionLevel = level; options.verbosity = 1; ASSERT_TRUE(roundTrip(options)); errorGuard.dismiss(); } } } } TEST(Pzstd, DISABLED_ExtremelyLargeSize) { unsigned seed = std::random_device{}(); std::fprintf(stderr, "Pzstd.ExtremelyLargeSize seed: %u\n", seed); std::mt19937 gen(seed); std::string inputFile = std::tmpnam(nullptr); auto guard = makeScopeGuard([&] { std::remove(inputFile.c_str()); }); { // Write 4GB + 64 MB constexpr size_t kLength = 1 << 26; std::unique_ptr buf(new uint8_t[kLength]); auto fd = std::fopen(inputFile.c_str(), "wb"); auto closeGuard = makeScopeGuard([&] { std::fclose(fd); }); for (size_t i = 0; i < (1 << 6) + 1; ++i) { RDG_genBuffer(buf.get(), kLength, 0.5, 0.0, gen()); auto written = std::fwrite(buf.get(), 1, kLength, fd); if (written != kLength) { std::fprintf(stderr, "Failed to write file, skipping test\n"); return; } } } Options options; options.overwrite = true; options.inputFiles = {inputFile}; options.compressionLevel = 1; if (options.numThreads == 0) { options.numThreads = 1; } ASSERT_TRUE(roundTrip(options)); } TEST(Pzstd, ExtremelyCompressible) { std::string inputFile = std::tmpnam(nullptr); auto guard = makeScopeGuard([&] { std::remove(inputFile.c_str()); }); { std::unique_ptr buf(new uint8_t[10000]); std::memset(buf.get(), 'a', 10000); auto fd = std::fopen(inputFile.c_str(), "wb"); auto written = std::fwrite(buf.get(), 1, 10000, fd); std::fclose(fd); ASSERT_EQ(written, 10000); } Options options; options.overwrite = true; options.inputFiles = {inputFile}; options.numThreads = 1; options.compressionLevel = 1; ASSERT_TRUE(roundTrip(options)); } Index: vendor/zstd/dist/contrib/pzstd/test/RoundTrip.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/test/RoundTrip.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/test/RoundTrip.h (revision 325597) @@ -1,86 +1,86 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "Options.h" #include "Pzstd.h" #include "utils/ScopeGuard.h" #include #include #include #include namespace pzstd { inline bool check(std::string source, std::string decompressed) { std::unique_ptr sBuf(new std::uint8_t[1024]); std::unique_ptr dBuf(new std::uint8_t[1024]); auto sFd = std::fopen(source.c_str(), "rb"); auto dFd = std::fopen(decompressed.c_str(), "rb"); auto guard = makeScopeGuard([&] { std::fclose(sFd); std::fclose(dFd); }); size_t sRead, dRead; do { sRead = std::fread(sBuf.get(), 1, 1024, sFd); dRead = std::fread(dBuf.get(), 1, 1024, dFd); if (std::ferror(sFd) || std::ferror(dFd)) { return false; } if (sRead != dRead) { return false; } for (size_t i = 0; i < sRead; ++i) { if (sBuf.get()[i] != dBuf.get()[i]) { return false; } } } while (sRead == 1024); if (!std::feof(sFd) || !std::feof(dFd)) { return false; } return true; } inline bool roundTrip(Options& options) { if (options.inputFiles.size() != 1) { return false; } std::string source = options.inputFiles.front(); std::string compressedFile = std::tmpnam(nullptr); std::string decompressedFile = std::tmpnam(nullptr); auto guard = makeScopeGuard([&] { std::remove(compressedFile.c_str()); std::remove(decompressedFile.c_str()); }); { options.outputFile = compressedFile; options.decompress = false; if (pzstdMain(options) != 0) { return false; } } { options.decompress = true; options.inputFiles.front() = compressedFile; options.outputFile = decompressedFile; if (pzstdMain(options) != 0) { return false; } } return check(source, decompressedFile); } } Index: vendor/zstd/dist/contrib/pzstd/test/RoundTripTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/test/RoundTripTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/test/RoundTripTest.cpp (revision 325597) @@ -1,86 +1,86 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ extern "C" { #include "datagen.h" } #include "Options.h" #include "test/RoundTrip.h" #include "utils/ScopeGuard.h" #include #include #include #include #include using namespace std; using namespace pzstd; namespace { string writeData(size_t size, double matchProba, double litProba, unsigned seed) { std::unique_ptr buf(new uint8_t[size]); RDG_genBuffer(buf.get(), size, matchProba, litProba, seed); string file = tmpnam(nullptr); auto fd = std::fopen(file.c_str(), "wb"); auto guard = makeScopeGuard([&] { std::fclose(fd); }); auto bytesWritten = std::fwrite(buf.get(), 1, size, fd); if (bytesWritten != size) { std::abort(); } return file; } template string generateInputFile(Generator& gen) { // Use inputs ranging from 1 Byte to 2^16 Bytes std::uniform_int_distribution size{1, 1 << 16}; std::uniform_real_distribution<> prob{0, 1}; return writeData(size(gen), prob(gen), prob(gen), gen()); } template Options generateOptions(Generator& gen, const string& inputFile) { Options options; options.inputFiles = {inputFile}; options.overwrite = true; std::uniform_int_distribution numThreads{1, 32}; std::uniform_int_distribution compressionLevel{1, 10}; options.numThreads = numThreads(gen); options.compressionLevel = compressionLevel(gen); return options; } } int main() { std::mt19937 gen(std::random_device{}()); auto newlineGuard = makeScopeGuard([] { std::fprintf(stderr, "\n"); }); for (unsigned i = 0; i < 10000; ++i) { if (i % 100 == 0) { std::fprintf(stderr, "Progress: %u%%\r", i / 100); } auto inputFile = generateInputFile(gen); auto inputGuard = makeScopeGuard([&] { std::remove(inputFile.c_str()); }); for (unsigned i = 0; i < 10; ++i) { auto options = generateOptions(gen, inputFile); if (!roundTrip(options)) { std::fprintf(stderr, "numThreads: %u\n", options.numThreads); std::fprintf(stderr, "level: %u\n", options.compressionLevel); std::fprintf(stderr, "decompress? %u\n", (unsigned)options.decompress); std::fprintf(stderr, "file: %s\n", inputFile.c_str()); return 1; } } } return 0; } Index: vendor/zstd/dist/contrib/pzstd/utils/Buffer.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/Buffer.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/Buffer.h (revision 325597) @@ -1,99 +1,99 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "utils/Range.h" #include #include #include namespace pzstd { /** * A `Buffer` has a pointer to a shared buffer, and a range of the buffer that * it owns. * The idea is that you can allocate one buffer, and write chunks into it * and break off those chunks. * The underlying buffer is reference counted, and will be destroyed when all * `Buffer`s that reference it are destroyed. */ class Buffer { std::shared_ptr buffer_; MutableByteRange range_; static void delete_buffer(unsigned char* buffer) { delete[] buffer; } public: /// Construct an empty buffer that owns no data. explicit Buffer() {} /// Construct a `Buffer` that owns a new underlying buffer of size `size`. explicit Buffer(std::size_t size) : buffer_(new unsigned char[size], delete_buffer), range_(buffer_.get(), buffer_.get() + size) {} explicit Buffer(std::shared_ptr buffer, MutableByteRange data) : buffer_(buffer), range_(data) {} Buffer(Buffer&&) = default; Buffer& operator=(Buffer&&) & = default; /** * Splits the data into two pieces: [begin, begin + n), [begin + n, end). * Their data both points into the same underlying buffer. * Modifies the original `Buffer` to point to only [begin + n, end). * * @param n The offset to split at. * @returns A buffer that owns the data [begin, begin + n). */ Buffer splitAt(std::size_t n) { auto firstPiece = range_.subpiece(0, n); range_.advance(n); return Buffer(buffer_, firstPiece); } /// Modifies the buffer to point to the range [begin + n, end). void advance(std::size_t n) { range_.advance(n); } /// Modifies the buffer to point to the range [begin, end - n). void subtract(std::size_t n) { range_.subtract(n); } /// Returns a read only `Range` pointing to the `Buffer`s data. ByteRange range() const { return range_; } /// Returns a mutable `Range` pointing to the `Buffer`s data. MutableByteRange range() { return range_; } const unsigned char* data() const { return range_.data(); } unsigned char* data() { return range_.data(); } std::size_t size() const { return range_.size(); } bool empty() const { return range_.empty(); } }; } Index: vendor/zstd/dist/contrib/pzstd/utils/FileSystem.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/FileSystem.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/FileSystem.h (revision 325597) @@ -1,94 +1,94 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "utils/Range.h" #include #include #include #include // A small subset of `std::filesystem`. // `std::filesystem` should be a drop in replacement. // See http://en.cppreference.com/w/cpp/filesystem for documentation. namespace pzstd { // using file_status = ... causes gcc to emit a false positive warning #if defined(_MSC_VER) typedef struct ::_stat64 file_status; #else typedef struct ::stat file_status; #endif /// http://en.cppreference.com/w/cpp/filesystem/status inline file_status status(StringPiece path, std::error_code& ec) noexcept { file_status status; #if defined(_MSC_VER) const auto error = ::_stat64(path.data(), &status); #else const auto error = ::stat(path.data(), &status); #endif if (error) { ec.assign(errno, std::generic_category()); } else { ec.clear(); } return status; } /// http://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(file_status status) noexcept { #if defined(S_ISREG) return S_ISREG(status.st_mode); #elif !defined(S_ISREG) && defined(S_IFMT) && defined(S_IFREG) return (status.st_mode & S_IFMT) == S_IFREG; #else static_assert(false, "No POSIX stat() support."); #endif } /// http://en.cppreference.com/w/cpp/filesystem/is_regular_file inline bool is_regular_file(StringPiece path, std::error_code& ec) noexcept { return is_regular_file(status(path, ec)); } /// http://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(file_status status) noexcept { #if defined(S_ISDIR) return S_ISDIR(status.st_mode); #elif !defined(S_ISDIR) && defined(S_IFMT) && defined(S_IFDIR) return (status.st_mode & S_IFMT) == S_IFDIR; #else static_assert(false, "NO POSIX stat() support."); #endif } /// http://en.cppreference.com/w/cpp/filesystem/is_directory inline bool is_directory(StringPiece path, std::error_code& ec) noexcept { return is_directory(status(path, ec)); } /// http://en.cppreference.com/w/cpp/filesystem/file_size inline std::uintmax_t file_size( StringPiece path, std::error_code& ec) noexcept { auto stat = status(path, ec); if (ec) { return -1; } if (!is_regular_file(stat)) { ec.assign(ENOTSUP, std::generic_category()); return -1; } ec.clear(); return stat.st_size; } } Index: vendor/zstd/dist/contrib/pzstd/utils/Likely.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/Likely.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/Likely.h (revision 325597) @@ -1,28 +1,28 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ /** * Compiler hints to indicate the fast path of an "if" branch: whether * the if condition is likely to be true or false. * * @author Tudor Bosman (tudorb@fb.com) */ #pragma once #undef LIKELY #undef UNLIKELY #if defined(__GNUC__) && __GNUC__ >= 4 #define LIKELY(x) (__builtin_expect((x), 1)) #define UNLIKELY(x) (__builtin_expect((x), 0)) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif Index: vendor/zstd/dist/contrib/pzstd/utils/Range.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/Range.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/Range.h (revision 325597) @@ -1,131 +1,131 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ /** * A subset of `folly/Range.h`. * All code copied verbatiam modulo formatting */ #pragma once #include "utils/Likely.h" #include #include #include #include #include namespace pzstd { namespace detail { /* *Use IsCharPointer::type to enable const char* or char*. *Use IsCharPointer::const_type to enable only const char*. */ template struct IsCharPointer {}; template <> struct IsCharPointer { typedef int type; }; template <> struct IsCharPointer { typedef int const_type; typedef int type; }; } // namespace detail template class Range { Iter b_; Iter e_; public: using size_type = std::size_t; using iterator = Iter; using const_iterator = Iter; using value_type = typename std::remove_reference< typename std::iterator_traits::reference>::type; using reference = typename std::iterator_traits::reference; constexpr Range() : b_(), e_() {} constexpr Range(Iter begin, Iter end) : b_(begin), e_(end) {} constexpr Range(Iter begin, size_type size) : b_(begin), e_(begin + size) {} template ::type = 0> /* implicit */ Range(Iter str) : b_(str), e_(str + std::strlen(str)) {} template ::const_type = 0> /* implicit */ Range(const std::string& str) : b_(str.data()), e_(b_ + str.size()) {} // Allow implicit conversion from Range to Range if From is // implicitly convertible to To. template < class OtherIter, typename std::enable_if< (!std::is_same::value && std::is_convertible::value), int>::type = 0> constexpr /* implicit */ Range(const Range& other) : b_(other.begin()), e_(other.end()) {} Range(const Range&) = default; Range(Range&&) = default; Range& operator=(const Range&) & = default; Range& operator=(Range&&) & = default; constexpr size_type size() const { return e_ - b_; } bool empty() const { return b_ == e_; } Iter data() const { return b_; } Iter begin() const { return b_; } Iter end() const { return e_; } void advance(size_type n) { if (UNLIKELY(n > size())) { throw std::out_of_range("index out of range"); } b_ += n; } void subtract(size_type n) { if (UNLIKELY(n > size())) { throw std::out_of_range("index out of range"); } e_ -= n; } Range subpiece(size_type first, size_type length = std::string::npos) const { if (UNLIKELY(first > size())) { throw std::out_of_range("index out of range"); } return Range(b_ + first, std::min(length, size() - first)); } }; using ByteRange = Range; using MutableByteRange = Range; using StringPiece = Range; } Index: vendor/zstd/dist/contrib/pzstd/utils/ResourcePool.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/ResourcePool.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/ResourcePool.h (revision 325597) @@ -1,96 +1,96 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include #include #include #include #include namespace pzstd { /** * An unbounded pool of resources. * A `ResourcePool` requires a factory function that takes allocates `T*` and * a free function that frees a `T*`. * Calling `ResourcePool::get()` will give you a new `ResourcePool::UniquePtr` * to a `T`, and when it goes out of scope the resource will be returned to the * pool. * The `ResourcePool` *must* survive longer than any resources it hands out. * Remember that `ResourcePool` hands out mutable `T`s, so make sure to clean * up the resource before or after every use. */ template class ResourcePool { public: class Deleter; using Factory = std::function; using Free = std::function; using UniquePtr = std::unique_ptr; private: std::mutex mutex_; Factory factory_; Free free_; std::vector resources_; unsigned inUse_; public: /** * Creates a `ResourcePool`. * * @param factory The function to use to create new resources. * @param free The function to use to free resources created by `factory`. */ ResourcePool(Factory factory, Free free) : factory_(std::move(factory)), free_(std::move(free)), inUse_(0) {} /** * @returns A unique pointer to a resource. The resource is null iff * there are no avaiable resources and `factory()` returns null. */ UniquePtr get() { std::lock_guard lock(mutex_); if (!resources_.empty()) { UniquePtr resource{resources_.back(), Deleter{*this}}; resources_.pop_back(); ++inUse_; return resource; } UniquePtr resource{factory_(), Deleter{*this}}; ++inUse_; return resource; } ~ResourcePool() noexcept { assert(inUse_ == 0); for (const auto resource : resources_) { free_(resource); } } class Deleter { ResourcePool *pool_; public: explicit Deleter(ResourcePool &pool) : pool_(&pool) {} void operator() (T *resource) { std::lock_guard lock(pool_->mutex_); // Make sure we don't put null resources into the pool if (resource) { pool_->resources_.push_back(resource); } assert(pool_->inUse_ > 0); --pool_->inUse_; } }; }; } Index: vendor/zstd/dist/contrib/pzstd/utils/ScopeGuard.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/ScopeGuard.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/ScopeGuard.h (revision 325597) @@ -1,50 +1,50 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include namespace pzstd { /** * Dismissable scope guard. * `Function` must be callable and take no parameters. * Unless `dissmiss()` is called, the callable is executed upon destruction of * `ScopeGuard`. * * Example: * * auto guard = makeScopeGuard([&] { cleanup(); }); */ template class ScopeGuard { Function function; bool dismissed; public: explicit ScopeGuard(Function&& function) : function(std::move(function)), dismissed(false) {} void dismiss() { dismissed = true; } ~ScopeGuard() noexcept { if (!dismissed) { function(); } } }; /// Creates a scope guard from `function`. template ScopeGuard makeScopeGuard(Function&& function) { return ScopeGuard(std::forward(function)); } } Index: vendor/zstd/dist/contrib/pzstd/utils/ThreadPool.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/ThreadPool.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/ThreadPool.h (revision 325597) @@ -1,58 +1,58 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "utils/WorkQueue.h" #include #include #include #include namespace pzstd { /// A simple thread pool that pulls tasks off its queue in FIFO order. class ThreadPool { std::vector threads_; WorkQueue> tasks_; public: /// Constructs a thread pool with `numThreads` threads. explicit ThreadPool(std::size_t numThreads) { threads_.reserve(numThreads); for (std::size_t i = 0; i < numThreads; ++i) { threads_.emplace_back([this] { std::function task; while (tasks_.pop(task)) { task(); } }); } } /// Finishes all tasks currently in the queue. ~ThreadPool() { tasks_.finish(); for (auto& thread : threads_) { thread.join(); } } /** * Adds `task` to the queue of tasks to execute. Since `task` is a * `std::function<>`, it cannot be a move only type. So any lambda passed must * not capture move only types (like `std::unique_ptr`). * * @param task The task to execute. */ void add(std::function task) { tasks_.push(std::move(task)); } }; } Index: vendor/zstd/dist/contrib/pzstd/utils/WorkQueue.h =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/WorkQueue.h (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/WorkQueue.h (revision 325597) @@ -1,181 +1,181 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #pragma once #include "utils/Buffer.h" #include #include #include #include #include #include #include #include namespace pzstd { /// Unbounded thread-safe work queue. template class WorkQueue { // Protects all member variable access std::mutex mutex_; std::condition_variable readerCv_; std::condition_variable writerCv_; std::condition_variable finishCv_; std::queue queue_; bool done_; std::size_t maxSize_; // Must have lock to call this function bool full() const { if (maxSize_ == 0) { return false; } return queue_.size() >= maxSize_; } public: /** * Constructs an empty work queue with an optional max size. * If `maxSize == 0` the queue size is unbounded. * * @param maxSize The maximum allowed size of the work queue. */ WorkQueue(std::size_t maxSize = 0) : done_(false), maxSize_(maxSize) {} /** * Push an item onto the work queue. Notify a single thread that work is * available. If `finish()` has been called, do nothing and return false. * If `push()` returns false, then `item` has not been moved from. * * @param item Item to push onto the queue. * @returns True upon success, false if `finish()` has been called. An * item was pushed iff `push()` returns true. */ bool push(T&& item) { { std::unique_lock lock(mutex_); while (full() && !done_) { writerCv_.wait(lock); } if (done_) { return false; } queue_.push(std::move(item)); } readerCv_.notify_one(); return true; } /** * Attempts to pop an item off the work queue. It will block until data is * available or `finish()` has been called. * * @param[out] item If `pop` returns `true`, it contains the popped item. * If `pop` returns `false`, it is unmodified. * @returns True upon success. False if the queue is empty and * `finish()` has been called. */ bool pop(T& item) { { std::unique_lock lock(mutex_); while (queue_.empty() && !done_) { readerCv_.wait(lock); } if (queue_.empty()) { assert(done_); return false; } item = std::move(queue_.front()); queue_.pop(); } writerCv_.notify_one(); return true; } /** * Sets the maximum queue size. If `maxSize == 0` then it is unbounded. * * @param maxSize The new maximum queue size. */ void setMaxSize(std::size_t maxSize) { { std::lock_guard lock(mutex_); maxSize_ = maxSize; } writerCv_.notify_all(); } /** * Promise that `push()` won't be called again, so once the queue is empty * there will never any more work. */ void finish() { { std::lock_guard lock(mutex_); assert(!done_); done_ = true; } readerCv_.notify_all(); writerCv_.notify_all(); finishCv_.notify_all(); } /// Blocks until `finish()` has been called (but the queue may not be empty). void waitUntilFinished() { std::unique_lock lock(mutex_); while (!done_) { finishCv_.wait(lock); } } }; /// Work queue for `Buffer`s that knows the total number of bytes in the queue. class BufferWorkQueue { WorkQueue queue_; std::atomic size_; public: BufferWorkQueue(std::size_t maxSize = 0) : queue_(maxSize), size_(0) {} void push(Buffer buffer) { size_.fetch_add(buffer.size()); queue_.push(std::move(buffer)); } bool pop(Buffer& buffer) { bool result = queue_.pop(buffer); if (result) { size_.fetch_sub(buffer.size()); } return result; } void setMaxSize(std::size_t maxSize) { queue_.setMaxSize(maxSize); } void finish() { queue_.finish(); } /** * Blocks until `finish()` has been called. * * @returns The total number of bytes of all the `Buffer`s currently in the * queue. */ std::size_t size() { queue_.waitUntilFinished(); return size_.load(); } }; } Index: vendor/zstd/dist/contrib/pzstd/utils/test/BufferTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/BufferTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/BufferTest.cpp (revision 325597) @@ -1,89 +1,89 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/Buffer.h" #include "utils/Range.h" #include #include using namespace pzstd; namespace { void deleter(const unsigned char* buf) { delete[] buf; } } TEST(Buffer, Constructors) { Buffer empty; EXPECT_TRUE(empty.empty()); EXPECT_EQ(0, empty.size()); Buffer sized(5); EXPECT_FALSE(sized.empty()); EXPECT_EQ(5, sized.size()); Buffer moved(std::move(sized)); EXPECT_FALSE(sized.empty()); EXPECT_EQ(5, sized.size()); Buffer assigned; assigned = std::move(moved); EXPECT_FALSE(sized.empty()); EXPECT_EQ(5, sized.size()); } TEST(Buffer, BufferManagement) { std::shared_ptr buf(new unsigned char[10], deleter); { Buffer acquired(buf, MutableByteRange(buf.get(), buf.get() + 10)); EXPECT_EQ(2, buf.use_count()); Buffer moved(std::move(acquired)); EXPECT_EQ(2, buf.use_count()); Buffer assigned; assigned = std::move(moved); EXPECT_EQ(2, buf.use_count()); Buffer split = assigned.splitAt(5); EXPECT_EQ(3, buf.use_count()); split.advance(1); assigned.subtract(1); EXPECT_EQ(3, buf.use_count()); } EXPECT_EQ(1, buf.use_count()); } TEST(Buffer, Modifiers) { Buffer buf(10); { unsigned char i = 0; for (auto& byte : buf.range()) { byte = i++; } } auto prefix = buf.splitAt(2); ASSERT_EQ(2, prefix.size()); EXPECT_EQ(0, *prefix.data()); ASSERT_EQ(8, buf.size()); EXPECT_EQ(2, *buf.data()); buf.advance(2); EXPECT_EQ(4, *buf.data()); EXPECT_EQ(9, *(buf.range().end() - 1)); buf.subtract(2); EXPECT_EQ(7, *(buf.range().end() - 1)); EXPECT_EQ(4, buf.size()); } Index: vendor/zstd/dist/contrib/pzstd/utils/test/RangeTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/RangeTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/RangeTest.cpp (revision 325597) @@ -1,82 +1,82 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/Range.h" #include #include using namespace pzstd; // Range is directly copied from folly. // Just some sanity tests to make sure everything seems to work. TEST(Range, Constructors) { StringPiece empty; EXPECT_TRUE(empty.empty()); EXPECT_EQ(0, empty.size()); std::string str = "hello"; { Range piece(str.begin(), str.end()); EXPECT_EQ(5, piece.size()); EXPECT_EQ('h', *piece.data()); EXPECT_EQ('o', *(piece.end() - 1)); } { StringPiece piece(str.data(), str.size()); EXPECT_EQ(5, piece.size()); EXPECT_EQ('h', *piece.data()); EXPECT_EQ('o', *(piece.end() - 1)); } { StringPiece piece(str); EXPECT_EQ(5, piece.size()); EXPECT_EQ('h', *piece.data()); EXPECT_EQ('o', *(piece.end() - 1)); } { StringPiece piece(str.c_str()); EXPECT_EQ(5, piece.size()); EXPECT_EQ('h', *piece.data()); EXPECT_EQ('o', *(piece.end() - 1)); } } TEST(Range, Modifiers) { StringPiece range("hello world"); ASSERT_EQ(11, range.size()); { auto hello = range.subpiece(0, 5); EXPECT_EQ(5, hello.size()); EXPECT_EQ('h', *hello.data()); EXPECT_EQ('o', *(hello.end() - 1)); } { auto hello = range; hello.subtract(6); EXPECT_EQ(5, hello.size()); EXPECT_EQ('h', *hello.data()); EXPECT_EQ('o', *(hello.end() - 1)); } { auto world = range; world.advance(6); EXPECT_EQ(5, world.size()); EXPECT_EQ('w', *world.data()); EXPECT_EQ('d', *(world.end() - 1)); } std::string expected = "hello world"; EXPECT_EQ(expected, std::string(range.begin(), range.end())); EXPECT_EQ(expected, std::string(range.data(), range.size())); } Index: vendor/zstd/dist/contrib/pzstd/utils/test/ResourcePoolTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/ResourcePoolTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/ResourcePoolTest.cpp (revision 325597) @@ -1,72 +1,72 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/ResourcePool.h" #include #include #include using namespace pzstd; TEST(ResourcePool, FullTest) { unsigned numCreated = 0; unsigned numDeleted = 0; { ResourcePool pool( [&numCreated] { ++numCreated; return new int{5}; }, [&numDeleted](int *x) { ++numDeleted; delete x; }); { auto i = pool.get(); EXPECT_EQ(5, *i); *i = 6; } { auto i = pool.get(); EXPECT_EQ(6, *i); auto j = pool.get(); EXPECT_EQ(5, *j); *j = 7; } { auto i = pool.get(); EXPECT_EQ(6, *i); auto j = pool.get(); EXPECT_EQ(7, *j); } } EXPECT_EQ(2, numCreated); EXPECT_EQ(numCreated, numDeleted); } TEST(ResourcePool, ThreadSafe) { std::atomic numCreated{0}; std::atomic numDeleted{0}; { ResourcePool pool( [&numCreated] { ++numCreated; return new int{0}; }, [&numDeleted](int *x) { ++numDeleted; delete x; }); auto push = [&pool] { for (int i = 0; i < 100; ++i) { auto x = pool.get(); ++*x; } }; std::thread t1{push}; std::thread t2{push}; t1.join(); t2.join(); auto x = pool.get(); auto y = pool.get(); EXPECT_EQ(200, *x + *y); } EXPECT_GE(2, numCreated); EXPECT_EQ(numCreated, numDeleted); } Index: vendor/zstd/dist/contrib/pzstd/utils/test/ScopeGuardTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/ScopeGuardTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/ScopeGuardTest.cpp (revision 325597) @@ -1,28 +1,28 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/ScopeGuard.h" #include using namespace pzstd; TEST(ScopeGuard, Dismiss) { { auto guard = makeScopeGuard([&] { EXPECT_TRUE(false); }); guard.dismiss(); } } TEST(ScopeGuard, Executes) { bool executed = false; { auto guard = makeScopeGuard([&] { executed = true; }); } EXPECT_TRUE(executed); } Index: vendor/zstd/dist/contrib/pzstd/utils/test/ThreadPoolTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/ThreadPoolTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/ThreadPoolTest.cpp (revision 325597) @@ -1,71 +1,71 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/ThreadPool.h" #include #include #include #include #include using namespace pzstd; TEST(ThreadPool, Ordering) { std::vector results; { ThreadPool executor(1); for (int i = 0; i < 10; ++i) { executor.add([ &results, i ] { results.push_back(i); }); } } for (int i = 0; i < 10; ++i) { EXPECT_EQ(i, results[i]); } } TEST(ThreadPool, AllJobsFinished) { std::atomic numFinished{0}; std::atomic start{false}; { std::cerr << "Creating executor" << std::endl; ThreadPool executor(5); for (int i = 0; i < 10; ++i) { executor.add([ &numFinished, &start ] { while (!start.load()) { std::this_thread::yield(); } ++numFinished; }); } std::cerr << "Starting" << std::endl; start.store(true); std::cerr << "Finishing" << std::endl; } EXPECT_EQ(10, numFinished.load()); } TEST(ThreadPool, AddJobWhileJoining) { std::atomic done{false}; { ThreadPool executor(1); executor.add([&executor, &done] { while (!done.load()) { std::this_thread::yield(); } // Sleep for a second to be sure that we are joining std::this_thread::sleep_for(std::chrono::seconds(1)); executor.add([] { EXPECT_TRUE(false); }); }); done.store(true); } } Index: vendor/zstd/dist/contrib/pzstd/utils/test/WorkQueueTest.cpp =================================================================== --- vendor/zstd/dist/contrib/pzstd/utils/test/WorkQueueTest.cpp (revision 325596) +++ vendor/zstd/dist/contrib/pzstd/utils/test/WorkQueueTest.cpp (revision 325597) @@ -1,282 +1,282 @@ -/** +/* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include "utils/Buffer.h" #include "utils/WorkQueue.h" #include #include #include #include #include #include using namespace pzstd; namespace { struct Popper { WorkQueue* queue; int* results; std::mutex* mutex; void operator()() { int result; while (queue->pop(result)) { std::lock_guard lock(*mutex); results[result] = result; } } }; } TEST(WorkQueue, SingleThreaded) { WorkQueue queue; int result; queue.push(5); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(5, result); queue.push(1); queue.push(2); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(1, result); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(2, result); queue.push(1); queue.push(2); queue.finish(); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(1, result); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(2, result); EXPECT_FALSE(queue.pop(result)); queue.waitUntilFinished(); } TEST(WorkQueue, SPSC) { WorkQueue queue; const int max = 100; for (int i = 0; i < 10; ++i) { queue.push(int{i}); } std::thread thread([ &queue, max ] { int result; for (int i = 0;; ++i) { if (!queue.pop(result)) { EXPECT_EQ(i, max); break; } EXPECT_EQ(i, result); } }); std::this_thread::yield(); for (int i = 10; i < max; ++i) { queue.push(int{i}); } queue.finish(); thread.join(); } TEST(WorkQueue, SPMC) { WorkQueue queue; std::vector results(50, -1); std::mutex mutex; std::vector threads; for (int i = 0; i < 5; ++i) { threads.emplace_back(Popper{&queue, results.data(), &mutex}); } for (int i = 0; i < 50; ++i) { queue.push(int{i}); } queue.finish(); for (auto& thread : threads) { thread.join(); } for (int i = 0; i < 50; ++i) { EXPECT_EQ(i, results[i]); } } TEST(WorkQueue, MPMC) { WorkQueue queue; std::vector results(100, -1); std::mutex mutex; std::vector popperThreads; for (int i = 0; i < 4; ++i) { popperThreads.emplace_back(Popper{&queue, results.data(), &mutex}); } std::vector pusherThreads; for (int i = 0; i < 2; ++i) { auto min = i * 50; auto max = (i + 1) * 50; pusherThreads.emplace_back( [ &queue, min, max ] { for (int i = min; i < max; ++i) { queue.push(int{i}); } }); } for (auto& thread : pusherThreads) { thread.join(); } queue.finish(); for (auto& thread : popperThreads) { thread.join(); } for (int i = 0; i < 100; ++i) { EXPECT_EQ(i, results[i]); } } TEST(WorkQueue, BoundedSizeWorks) { WorkQueue queue(1); int result; queue.push(5); queue.pop(result); queue.push(5); queue.pop(result); queue.push(5); queue.finish(); queue.pop(result); EXPECT_EQ(5, result); } TEST(WorkQueue, BoundedSizePushAfterFinish) { WorkQueue queue(1); int result; queue.push(5); std::thread pusher([&queue] { queue.push(6); }); // Dirtily try and make sure that pusher has run. std::this_thread::sleep_for(std::chrono::seconds(1)); queue.finish(); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(5, result); EXPECT_FALSE(queue.pop(result)); pusher.join(); } TEST(WorkQueue, SetMaxSize) { WorkQueue queue(2); int result; queue.push(5); queue.push(6); queue.setMaxSize(1); std::thread pusher([&queue] { queue.push(7); }); // Dirtily try and make sure that pusher has run. std::this_thread::sleep_for(std::chrono::seconds(1)); queue.finish(); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(5, result); EXPECT_TRUE(queue.pop(result)); EXPECT_EQ(6, result); EXPECT_FALSE(queue.pop(result)); pusher.join(); } TEST(WorkQueue, BoundedSizeMPMC) { WorkQueue queue(10); std::vector results(200, -1); std::mutex mutex; std::cerr << "Creating popperThreads" << std::endl; std::vector popperThreads; for (int i = 0; i < 4; ++i) { popperThreads.emplace_back(Popper{&queue, results.data(), &mutex}); } std::cerr << "Creating pusherThreads" << std::endl; std::vector pusherThreads; for (int i = 0; i < 2; ++i) { auto min = i * 100; auto max = (i + 1) * 100; pusherThreads.emplace_back( [ &queue, min, max ] { for (int i = min; i < max; ++i) { queue.push(int{i}); } }); } std::cerr << "Joining pusherThreads" << std::endl; for (auto& thread : pusherThreads) { thread.join(); } std::cerr << "Finishing queue" << std::endl; queue.finish(); std::cerr << "Joining popperThreads" << std::endl; for (auto& thread : popperThreads) { thread.join(); } std::cerr << "Inspecting results" << std::endl; for (int i = 0; i < 200; ++i) { EXPECT_EQ(i, results[i]); } } TEST(WorkQueue, FailedPush) { WorkQueue> queue; std::unique_ptr x(new int{5}); EXPECT_TRUE(queue.push(std::move(x))); EXPECT_EQ(nullptr, x); queue.finish(); x.reset(new int{6}); EXPECT_FALSE(queue.push(std::move(x))); EXPECT_NE(nullptr, x); EXPECT_EQ(6, *x); } TEST(BufferWorkQueue, SizeCalculatedCorrectly) { { BufferWorkQueue queue; queue.finish(); EXPECT_EQ(0, queue.size()); } { BufferWorkQueue queue; queue.push(Buffer(10)); queue.finish(); EXPECT_EQ(10, queue.size()); } { BufferWorkQueue queue; queue.push(Buffer(10)); queue.push(Buffer(5)); queue.finish(); EXPECT_EQ(15, queue.size()); } { BufferWorkQueue queue; queue.push(Buffer(10)); queue.push(Buffer(5)); queue.finish(); Buffer buffer; queue.pop(buffer); EXPECT_EQ(5, queue.size()); } } Index: vendor/zstd/dist/contrib/seekable_format/examples/.gitignore =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/.gitignore (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/.gitignore (revision 325597) @@ -0,0 +1,4 @@ +seekable_compression +seekable_decompression +parallel_processing +parallel_compression Index: vendor/zstd/dist/contrib/seekable_format/examples/Makefile =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/Makefile (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/Makefile (revision 325597) @@ -0,0 +1,42 @@ +# ################################################################ +# Copyright (c) 2017-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ + +# This Makefile presumes libzstd is built, using `make` in / or /lib/ + +LDFLAGS += ../../../lib/libzstd.a +CPPFLAGS += -I../ -I../../../lib -I../../../lib/common + +CFLAGS ?= -O3 +CFLAGS += -g + +SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c + +.PHONY: default all clean test + +default: all + +all: seekable_compression seekable_decompression parallel_processing + +seekable_compression : seekable_compression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +seekable_decompression : seekable_decompression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +parallel_processing : parallel_processing.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread + +parallel_compression : parallel_compression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread + +clean: + @rm -f core *.o tmp* result* *.zst \ + seekable_compression seekable_decompression \ + parallel_processing parallel_compression + @echo Cleaning completed Property changes on: vendor/zstd/dist/contrib/seekable_format/examples/Makefile ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/examples/parallel_compression.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/parallel_compression.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/parallel_compression.c (revision 325597) @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#include // malloc, free, exit, atoi +#include // fprintf, perror, feof, fopen, etc. +#include // strlen, memset, strcat +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include +#if defined(WIN32) || defined(_WIN32) +# include +# define SLEEP(x) Sleep(x) +#else +# include +# define SLEEP(x) usleep(x * 1000) +#endif + +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#include "pool.h" // use zstd thread pool for demo + +#include "zstd_seekable.h" + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc:"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) +{ + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + +static long int ftell_orDie(FILE* file) +{ + long int off = ftell(file); + if (off != -1) return off; + /* error */ + perror("ftell"); + exit(8); +} + +struct job { + const void* src; + size_t srcSize; + void* dst; + size_t dstSize; + + unsigned checksum; + + int compressionLevel; + int done; +}; + +static void compressFrame(void* opaque) +{ + struct job* job = opaque; + + job->checksum = XXH64(job->src, job->srcSize, 0); + + size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel); + if (ZSTD_isError(ret)) { + fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); + exit(20); + } + + job->dstSize = ret; + job->done = 1; +} + +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads) +{ + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = fopen_orDie(outName, "wb"); + + if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } + unsigned dstSize = ZSTD_compressBound(frameSize); + + + fseek_orDie(fin, 0, SEEK_END); + long int length = ftell_orDie(fin); + fseek_orDie(fin, 0, SEEK_SET); + + size_t numFrames = (length + frameSize - 1) / frameSize; + + struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames); + + size_t i; + for(i = 0; i < numFrames; i++) { + void* in = malloc_orDie(frameSize); + void* out = malloc_orDie(dstSize); + + size_t inSize = fread_orDie(in, frameSize, fin); + + jobs[i].src = in; + jobs[i].srcSize = inSize; + jobs[i].dst = out; + jobs[i].dstSize = dstSize; + jobs[i].compressionLevel = cLevel; + jobs[i].done = 0; + POOL_add(pool, compressFrame, &jobs[i]); + } + + ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1); + if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } + for (i = 0; i < numFrames; i++) { + while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ + fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout); + free((void*)jobs[i].src); + free(jobs[i].dst); + + size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum); + if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); } + } + + { unsigned char seekTableBuff[1024]; + ZSTD_outBuffer out = {seekTableBuff, 1024, 0}; + while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) { + fwrite_orDie(seekTableBuff, out.pos, fout); + out.pos = 0; + } + fwrite_orDie(seekTableBuff, out.pos, fout); + } + + ZSTD_seekable_freeFrameLog(fl); + free(jobs); + fclose_orDie(fout); + fclose_orDie(fin); +} + +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} + +int main(int argc, const char** argv) { + const char* const exeName = argv[0]; + if (argc!=4) { + printf("wrong arguments\n"); + printf("usage:\n"); + printf("%s FILE FRAME_SIZE NB_THREADS\n", exeName); + return 1; + } + + { const char* const inFileName = argv[1]; + unsigned const frameSize = (unsigned)atoi(argv[2]); + int const nbThreads = atoi(argv[3]); + + const char* const outFileName = createOutFilename_orDie(inFileName); + compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads); + } + + return 0; +} Property changes on: vendor/zstd/dist/contrib/seekable_format/examples/parallel_compression.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/examples/parallel_processing.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/parallel_processing.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/parallel_processing.c (revision 325597) @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +/* + * A simple demo that sums up all the bytes in the file in parallel using + * seekable decompression and the zstd thread pool + */ + +#include // malloc, exit +#include // fprintf, perror, feof +#include // strerror +#include // errno +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include +#if defined(WIN32) || defined(_WIN32) +# include +# define SLEEP(x) Sleep(x) +#else +# include +# define SLEEP(x) usleep(x * 1000) +#endif + +#include "pool.h" // use zstd thread pool for demo + +#include "zstd_seekable.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc"); + exit(1); +} + +static void* realloc_orDie(void* ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr) return ptr; + /* error */ + perror("realloc"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) { + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + +struct sum_job { + const char* fname; + unsigned long long sum; + unsigned frameNb; + int done; +}; + +static void sumFrame(void* opaque) +{ + struct sum_job* job = (struct sum_job*)opaque; + job->done = 0; + + FILE* const fin = fopen_orDie(job->fname, "rb"); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t const frameSize = ZSTD_seekable_getFrameDecompressedSize(seekable, job->frameNb); + unsigned char* data = malloc_orDie(frameSize); + + size_t result = ZSTD_seekable_decompressFrame(seekable, data, frameSize, job->frameNb); + if (ZSTD_isError(result)) { fprintf(stderr, "ZSTD_seekable_decompressFrame() error : %s \n", ZSTD_getErrorName(result)); exit(12); } + + unsigned long long sum = 0; + size_t i; + for (i = 0; i < frameSize; i++) { + sum += data[i]; + } + job->sum = sum; + job->done = 1; + + fclose(fin); + ZSTD_seekable_free(seekable); + free(data); +} + +static void sumFile_orDie(const char* fname, int nbThreads) +{ + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + + FILE* const fin = fopen_orDie(fname, "rb"); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t const numFrames = ZSTD_seekable_getNumFrames(seekable); + struct sum_job* jobs = (struct sum_job*)malloc(numFrames * sizeof(struct sum_job)); + + size_t i; + for (i = 0; i < numFrames; i++) { + jobs[i] = (struct sum_job){ fname, 0, i, 0 }; + POOL_add(pool, sumFrame, &jobs[i]); + } + + unsigned long long total = 0; + + for (i = 0; i < numFrames; i++) { + while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ + total += jobs[i].sum; + } + + printf("Sum: %llu\n", total); + + POOL_free(pool); + ZSTD_seekable_free(seekable); + fclose(fin); + free(jobs); +} + + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc!=3) { + fprintf(stderr, "wrong arguments\n"); + fprintf(stderr, "usage:\n"); + fprintf(stderr, "%s FILE NB_THREADS\n", exeName); + return 1; + } + + { + const char* const inFilename = argv[1]; + int const nbThreads = atoi(argv[2]); + sumFile_orDie(inFilename, nbThreads); + } + + return 0; +} Property changes on: vendor/zstd/dist/contrib/seekable_format/examples/parallel_processing.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/examples/seekable_compression.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/seekable_compression.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/seekable_compression.c (revision 325597) @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#include // malloc, free, exit, atoi +#include // fprintf, perror, feof, fopen, etc. +#include // strlen, memset, strcat +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed + +#include "zstd_seekable.h" + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc:"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize) +{ + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = fopen_orDie(outName, "wb"); + size_t const buffInSize = ZSTD_CStreamInSize(); /* can always read one full block */ + void* const buffIn = malloc_orDie(buffInSize); + size_t const buffOutSize = ZSTD_CStreamOutSize(); /* can always flush a full block */ + void* const buffOut = malloc_orDie(buffOutSize); + + ZSTD_seekable_CStream* const cstream = ZSTD_seekable_createCStream(); + if (cstream==NULL) { fprintf(stderr, "ZSTD_seekable_createCStream() error \n"); exit(10); } + size_t const initResult = ZSTD_seekable_initCStream(cstream, cLevel, 1, frameSize); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_initCStream() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t read, toRead = buffInSize; + while( (read = fread_orDie(buffIn, toRead, fin)) ) { + ZSTD_inBuffer input = { buffIn, read, 0 }; + while (input.pos < input.size) { + ZSTD_outBuffer output = { buffOut, buffOutSize, 0 }; + toRead = ZSTD_seekable_compressStream(cstream, &output , &input); /* toRead is guaranteed to be <= ZSTD_CStreamInSize() */ + if (ZSTD_isError(toRead)) { fprintf(stderr, "ZSTD_seekable_compressStream() error : %s \n", ZSTD_getErrorName(toRead)); exit(12); } + if (toRead > buffInSize) toRead = buffInSize; /* Safely handle case when `buffInSize` is manually changed to a value < ZSTD_CStreamInSize()*/ + fwrite_orDie(buffOut, output.pos, fout); + } + } + + while (1) { + ZSTD_outBuffer output = { buffOut, buffOutSize, 0 }; + size_t const remainingToFlush = ZSTD_seekable_endStream(cstream, &output); /* close stream */ + if (ZSTD_isError(remainingToFlush)) { fprintf(stderr, "ZSTD_seekable_endStream() error : %s \n", ZSTD_getErrorName(remainingToFlush)); exit(13); } + fwrite_orDie(buffOut, output.pos, fout); + if (!remainingToFlush) break; + } + + ZSTD_seekable_freeCStream(cstream); + fclose_orDie(fout); + fclose_orDie(fin); + free(buffIn); + free(buffOut); +} + +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} + +int main(int argc, const char** argv) { + const char* const exeName = argv[0]; + if (argc!=3) { + printf("wrong arguments\n"); + printf("usage:\n"); + printf("%s FILE FRAME_SIZE\n", exeName); + return 1; + } + + { const char* const inFileName = argv[1]; + unsigned const frameSize = (unsigned)atoi(argv[2]); + + const char* const outFileName = createOutFilename_orDie(inFileName); + compressFile_orDie(inFileName, outFileName, 5, frameSize); + } + + return 0; +} Property changes on: vendor/zstd/dist/contrib/seekable_format/examples/seekable_compression.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/examples/seekable_decompression.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/examples/seekable_decompression.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/examples/seekable_decompression.c (revision 325597) @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + + +#include // malloc, exit +#include // fprintf, perror, feof +#include // strerror +#include // errno +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include + +#include "zstd_seekable.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc"); + exit(1); +} + +static void* realloc_orDie(void* ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr) return ptr; + /* error */ + perror("realloc"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) { + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + + +static void decompressFile_orDie(const char* fname, unsigned startOffset, unsigned endOffset) +{ + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = stdout; + size_t const buffOutSize = ZSTD_DStreamOutSize(); /* Guarantee to successfully flush at least one complete compressed block in all circumstances. */ + void* const buffOut = malloc_orDie(buffOutSize); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + while (startOffset < endOffset) { + size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset); + + if (ZSTD_isError(result)) { + fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n", + ZSTD_getErrorName(result)); + exit(12); + } + fwrite_orDie(buffOut, result, fout); + startOffset += result; + } + + ZSTD_seekable_free(seekable); + fclose_orDie(fin); + fclose_orDie(fout); + free(buffOut); +} + + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc!=4) { + fprintf(stderr, "wrong arguments\n"); + fprintf(stderr, "usage:\n"); + fprintf(stderr, "%s FILE START END\n", exeName); + return 1; + } + + { + const char* const inFilename = argv[1]; + unsigned const startOffset = (unsigned) atoi(argv[2]); + unsigned const endOffset = (unsigned) atoi(argv[3]); + decompressFile_orDie(inFilename, startOffset, endOffset); + } + + return 0; +} Property changes on: vendor/zstd/dist/contrib/seekable_format/examples/seekable_decompression.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/zstd_seekable.h =================================================================== --- vendor/zstd/dist/contrib/seekable_format/zstd_seekable.h (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/zstd_seekable.h (revision 325597) @@ -0,0 +1,184 @@ +#ifndef SEEKABLE_H +#define SEEKABLE_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include + +static const unsigned ZSTD_seekTableFooterSize = 9; + +#define ZSTD_SEEKABLE_MAGICNUMBER 0x8F92EAB1 + +#define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U + +/* Limit the maximum size to avoid any potential issues storing the compressed size */ +#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U + +/*-**************************************************************************** +* Seekable Format +* +* The seekable format splits the compressed data into a series of "frames", +* each compressed individually so that decompression of a section in the +* middle of an archive only requires zstd to decompress at most a frame's +* worth of extra data, instead of the entire archive. +******************************************************************************/ + +typedef struct ZSTD_seekable_CStream_s ZSTD_seekable_CStream; +typedef struct ZSTD_seekable_s ZSTD_seekable; + +/*-**************************************************************************** +* Seekable compression - HowTo +* A ZSTD_seekable_CStream object is required to tracking streaming operation. +* Use ZSTD_seekable_createCStream() and ZSTD_seekable_freeCStream() to create/ +* release resources. +* +* Streaming objects are reusable to avoid allocation and deallocation, +* to start a new compression operation call ZSTD_seekable_initCStream() on the +* compressor. +* +* Data streamed to the seekable compressor will automatically be split into +* frames of size `maxFrameSize` (provided in ZSTD_seekable_initCStream()), +* or if none is provided, will be cut off whenever ZSTD_seekable_endFrame() is +* called or when the default maximum frame size (2GB) is reached. +* +* Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object +* for a new compression operation. +* `maxFrameSize` indicates the size at which to automatically start a new +* seekable frame. `maxFrameSize == 0` implies the default maximum size. +* `checksumFlag` indicates whether or not the seek table should include frame +* checksums on the uncompressed data for verification. +* @return : a size hint for input to provide for compression, or an error code +* checkable with ZSTD_isError() +* +* Use ZSTD_seekable_compressStream() repetitively to consume input stream. +* The function will automatically update both `pos` fields. +* Note that it may not consume the entire input, in which case `pos < size`, +* and it's up to the caller to present again remaining data. +* @return : a size hint, preferred nb of bytes to use as input for next +* function call or an error code, which can be tested using +* ZSTD_isError(). +* Note 1 : it's just a hint, to help latency a little, any other +* value will work fine. +* +* At any time, call ZSTD_seekable_endFrame() to end the current frame and +* start a new one. +* +* ZSTD_seekable_endStream() will end the current frame, and then write the seek +* table so that decompressors can efficiently find compressed frames. +* ZSTD_seekable_endStream() may return a number > 0 if it was unable to flush +* all the necessary data to `output`. In this case, it should be called again +* until all remaining data is flushed out and 0 is returned. +******************************************************************************/ + +/*===== Seekable compressor management =====*/ +ZSTDLIB_API ZSTD_seekable_CStream* ZSTD_seekable_createCStream(void); +ZSTDLIB_API size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs); + +/*===== Seekable compression functions =====*/ +ZSTDLIB_API size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, int compressionLevel, int checksumFlag, unsigned maxFrameSize); +ZSTDLIB_API size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); +ZSTDLIB_API size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output); +ZSTDLIB_API size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output); + +/*= Raw seek table API + * These functions allow for the seek table to be constructed directly. + * This table can then be appended to a file of concatenated frames. + * This allows the frames to be compressed independently, even in parallel, + * and compiled together afterward into a seekable archive. + * + * Use ZSTD_seekable_createFrameLog() to allocate and initialize a tracking + * structure. + * + * Call ZSTD_seekable_logFrame() once for each frame in the archive. + * checksum is optional, and will not be used if checksumFlag was 0 when the + * frame log was created. If present, it should be the least significant 32 + * bits of the XXH64 hash of the uncompressed data. + * + * Call ZSTD_seekable_writeSeekTable to serialize the data into a seek table. + * If the entire table was written, the return value will be 0. Otherwise, + * it will be equal to the number of bytes left to write. */ +typedef struct ZSTD_frameLog_s ZSTD_frameLog; +ZSTDLIB_API ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag); +ZSTDLIB_API size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl); +ZSTDLIB_API size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, unsigned compressedSize, unsigned decompressedSize, unsigned checksum); +ZSTDLIB_API size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output); + +/*-**************************************************************************** +* Seekable decompression - HowTo +* A ZSTD_seekable object is required to tracking the seekTable. +* +* Call ZSTD_seekable_init* to initialize a ZSTD_seekable object with the +* the seek table provided in the input. +* There are three modes for ZSTD_seekable_init: +* - ZSTD_seekable_initBuff() : An in-memory API. The data contained in +* `src` should be the entire seekable file, including the seek table. +* `src` should be kept alive and unmodified until the ZSTD_seekable object +* is freed or reset. +* - ZSTD_seekable_initFile() : A simplified file API using stdio. fread and +* fseek will be used to access the required data for building the seek +* table and doing decompression operations. `src` should not be closed +* or modified until the ZSTD_seekable object is freed or reset. +* - ZSTD_seekable_initAdvanced() : A general API allowing the client to +* provide its own read and seek callbacks. +* + ZSTD_seekable_read() : read exactly `n` bytes into `buffer`. +* Premature EOF should be treated as an error. +* + ZSTD_seekable_seek() : seek the read head to `offset` from `origin`, +* where origin is either SEEK_SET (beginning of +* file), or SEEK_END (end of file). +* Both functions should return a non-negative value in case of success, and a +* negative value in case of failure. If implementing using this API and +* stdio, be careful with files larger than 4GB and fseek. All of these +* functions return an error code checkable with ZSTD_isError(). +* +* Call ZSTD_seekable_decompress to decompress `dstSize` bytes at decompressed +* offset `offset`. ZSTD_seekable_decompress may have to decompress the entire +* prefix of the frame before the desired data if it has not already processed +* this section. If ZSTD_seekable_decompress is called multiple times for a +* consecutive range of data, it will efficiently retain the decompressor object +* and avoid redecompressing frame prefixes. The return value is the number of +* bytes decompressed, or an error code checkable with ZSTD_isError(). +* +* The seek table access functions can be used to obtain the data contained +* in the seek table. If frameIndex is larger than the value returned by +* ZSTD_seekable_getNumFrames(), they will return error codes checkable with +* ZSTD_isError(). Note that since the offset access functions return +* unsigned long long instead of size_t, in this case they will instead return +* the value ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE. +******************************************************************************/ + +/*===== Seekable decompressor management =====*/ +ZSTDLIB_API ZSTD_seekable* ZSTD_seekable_create(void); +ZSTDLIB_API size_t ZSTD_seekable_free(ZSTD_seekable* zs); + +/*===== Seekable decompression functions =====*/ +ZSTDLIB_API size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src); +ZSTDLIB_API size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned long long offset); +ZSTDLIB_API size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex); + +#define ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE (0ULL-2) +/*===== Seek Table access functions =====*/ +ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset); + +/*===== Seekable advanced I/O API =====*/ +typedef int(ZSTD_seekable_read)(void* opaque, void* buffer, size_t n); +typedef int(ZSTD_seekable_seek)(void* opaque, long long offset, int origin); +typedef struct { + void* opaque; + ZSTD_seekable_read* read; + ZSTD_seekable_seek* seek; +} ZSTD_seekable_customFile; +ZSTDLIB_API size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src); + +#if defined (__cplusplus) +} +#endif + +#endif Property changes on: vendor/zstd/dist/contrib/seekable_format/zstd_seekable.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/zstd_seekable_compression_format.md =================================================================== --- vendor/zstd/dist/contrib/seekable_format/zstd_seekable_compression_format.md (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/zstd_seekable_compression_format.md (revision 325597) @@ -0,0 +1,116 @@ +# Zstandard Seekable Format + +### Notices + +Copyright (c) 2017-present Facebook, Inc. + +Permission is granted to copy and distribute this document +for any purpose and without charge, +including translations into other languages +and incorporation into compilations, +provided that the copyright notice and this notice are preserved, +and that any substantive changes or deletions from the original +are clearly marked. +Distribution of this document is unlimited. + +### Version +0.1.0 (11/04/17) + +## Introduction +This document defines a format for compressed data to be stored so that subranges of the data can be efficiently decompressed without requiring the entire document to be decompressed. +This is done by splitting up the input data into frames, +each of which are compressed independently, +and so can be decompressed independently. +Decompression then takes advantage of a provided 'seek table', which allows the decompressor to immediately jump to the desired data. This is done in a way that is compatible with the original Zstandard format by placing the seek table in a Zstandard skippable frame. + +### Overall conventions +In this document: +- square brackets i.e. `[` and `]` are used to indicate optional fields or parameters. +- the naming convention for identifiers is `Mixed_Case_With_Underscores` +- All numeric fields are little-endian unless specified otherwise + +## Format + +The format consists of a number of frames (Zstandard compressed frames and skippable frames), followed by a final skippable frame at the end containing the seek table. + +### Seek Table Format +The structure of the seek table frame is as follows: + +|`Skippable_Magic_Number`|`Frame_Size`|`[Seek_Table_Entries]`|`Seek_Table_Footer`| +|------------------------|------------|----------------------|-------------------| +| 4 bytes | 4 bytes | 8-12 bytes each | 9 bytes | + +__`Skippable_Magic_Number`__ + +Value : 0x184D2A5E. +This is for compatibility with [Zstandard skippable frames]. +Since it is legal for other Zstandard skippable frames to use the same +magic number, it is not recommended for a decoder to recognize frames +solely on this. + +__`Frame_Size`__ + +The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`. +This is for compatibility with [Zstandard skippable frames]. + +[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames + +#### `Seek_Table_Footer` +The seek table footer format is as follows: + +|`Number_Of_Frames`|`Seek_Table_Descriptor`|`Seekable_Magic_Number`| +|------------------|-----------------------|-----------------------| +| 4 bytes | 1 byte | 4 bytes | + +__`Seekable_Magic_Number`__ + +Value : 0x8F92EAB1. +This value must be the last bytes present in the compressed file so that decoders +can efficiently find it and determine if there is an actual seek table present. + +__`Number_Of_Frames`__ + +The number of stored frames in the data. + +__`Seek_Table_Descriptor`__ + +A bitfield describing the format of the seek table. + +| Bit number | Field name | +| ---------- | ---------- | +| 7 | `Checksum_Flag` | +| 6-2 | `Reserved_Bits` | +| 1-0 | `Unused_Bits` | + +While only `Checksum_Flag` currently exists, there are 7 other bits in this field that can be used for future changes to the format, +for example the addition of inline dictionaries. + +__`Checksum_Flag`__ + +If the checksum flag is set, each of the seek table entries contains a 4 byte checksum of the uncompressed data contained in its frame. + +`Reserved_Bits` are not currently used but may be used in the future for breaking changes, so a compliant decoder should ensure they are set to 0. `Unused_Bits` may be used in the future for non-breaking changes, so a compliant decoder should not interpret these bits. + +#### __`Seek_Table_Entries`__ + +`Seek_Table_Entries` consists of `Number_Of_Frames` (one for each frame in the data, not including the seek table frame) entries of the following form, in sequence: + +|`Compressed_Size`|`Decompressed_Size`|`[Checksum]`| +|-----------------|-------------------|------------| +| 4 bytes | 4 bytes | 4 bytes | + +__`Compressed_Size`__ + +The compressed size of the frame. +The cumulative sum of the `Compressed_Size` fields of frames `0` to `i` gives the offset in the compressed file of frame `i+1`. + +__`Decompressed_Size`__ + +The size of the decompressed data contained in the frame. For skippable or otherwise empty frames, this value is 0. + +__`Checksum`__ + +Only present if `Checksum_Flag` is set in the `Seek_Table_Descriptor`. Value : the least significant 32 bits of the XXH64 digest of the uncompressed data, stored in little-endian format. + +## Version Changes +- 0.1.0: initial version Index: vendor/zstd/dist/contrib/seekable_format/zstdseek_compress.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/zstdseek_compress.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/zstdseek_compress.c (revision 325597) @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#include /* malloc, free */ + +#define XXH_STATIC_LINKING_ONLY +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "mem.h" +#include "zstd_seekable.h" + +#define CHECK_Z(f) { size_t const ret = (f); if (ret != 0) return ret; } + +#undef ERROR +#define ERROR(name) ((size_t)-ZSTD_error_##name) + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +typedef struct { + U32 cSize; + U32 dSize; + U32 checksum; +} framelogEntry_t; + +struct ZSTD_frameLog_s { + framelogEntry_t* entries; + U32 size; + U32 capacity; + + int checksumFlag; + + /* for use when streaming out the seek table */ + U32 seekTablePos; + U32 seekTableIndex; +} framelog_t; + +struct ZSTD_seekable_CStream_s { + ZSTD_CStream* cstream; + ZSTD_frameLog framelog; + + U32 frameCSize; + U32 frameDSize; + + XXH64_state_t xxhState; + + U32 maxFrameSize; + + int writingSeekTable; +}; + +size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl) +{ + /* allocate some initial space */ + size_t const FRAMELOG_STARTING_CAPACITY = 16; + fl->entries = (framelogEntry_t*)malloc( + sizeof(framelogEntry_t) * FRAMELOG_STARTING_CAPACITY); + if (fl->entries == NULL) return ERROR(memory_allocation); + fl->capacity = FRAMELOG_STARTING_CAPACITY; + + return 0; +} + +size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl) +{ + if (fl != NULL) free(fl->entries); + return 0; +} + +ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag) +{ + ZSTD_frameLog* fl = malloc(sizeof(ZSTD_frameLog)); + if (fl == NULL) return NULL; + + if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(fl))) { + free(fl); + return NULL; + } + + fl->checksumFlag = checksumFlag; + fl->seekTablePos = 0; + fl->seekTableIndex = 0; + fl->size = 0; + + return fl; +} + +size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl) +{ + ZSTD_seekable_frameLog_freeVec(fl); + free(fl); + return 0; +} + +ZSTD_seekable_CStream* ZSTD_seekable_createCStream() +{ + ZSTD_seekable_CStream* zcs = malloc(sizeof(ZSTD_seekable_CStream)); + + if (zcs == NULL) return NULL; + + memset(zcs, 0, sizeof(*zcs)); + + zcs->cstream = ZSTD_createCStream(); + if (zcs->cstream == NULL) goto failed1; + + if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(&zcs->framelog))) goto failed2; + + return zcs; + +failed2: + ZSTD_freeCStream(zcs->cstream); +failed1: + free(zcs); + return NULL; +} + +size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs) +{ + if (zcs == NULL) return 0; /* support free on null */ + ZSTD_freeCStream(zcs->cstream); + ZSTD_seekable_frameLog_freeVec(&zcs->framelog); + free(zcs); + + return 0; +} + +size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, + int compressionLevel, + int checksumFlag, + U32 maxFrameSize) +{ + zcs->framelog.size = 0; + zcs->frameCSize = 0; + zcs->frameDSize = 0; + + /* make sure maxFrameSize has a reasonable value */ + if (maxFrameSize > ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE) { + return ERROR(compressionParameter_unsupported); + } + + zcs->maxFrameSize = maxFrameSize + ? maxFrameSize + : ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE; + + zcs->framelog.checksumFlag = checksumFlag; + if (zcs->framelog.checksumFlag) { + XXH64_reset(&zcs->xxhState, 0); + } + + zcs->framelog.seekTablePos = 0; + zcs->framelog.seekTableIndex = 0; + zcs->writingSeekTable = 0; + + return ZSTD_initCStream(zcs->cstream, compressionLevel); +} + +size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, + unsigned compressedSize, + unsigned decompressedSize, + unsigned checksum) +{ + if (fl->size == ZSTD_SEEKABLE_MAXFRAMES) + return ERROR(frameIndex_tooLarge); + + /* grow the buffer if required */ + if (fl->size == fl->capacity) { + /* exponential size increase for constant amortized runtime */ + size_t const newCapacity = fl->capacity * 2; + framelogEntry_t* const newEntries = realloc(fl->entries, + sizeof(framelogEntry_t) * newCapacity); + + if (newEntries == NULL) return ERROR(memory_allocation); + + fl->entries = newEntries; + fl->capacity = newCapacity; + } + + fl->entries[fl->size] = (framelogEntry_t){ + compressedSize, decompressedSize, checksum + }; + fl->size++; + + return 0; +} + +size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) +{ + size_t const prevOutPos = output->pos; + /* end the frame */ + size_t ret = ZSTD_endStream(zcs->cstream, output); + + zcs->frameCSize += output->pos - prevOutPos; + + /* need to flush before doing the rest */ + if (ret) return ret; + + /* frame done */ + + /* store the frame data for later */ + ret = ZSTD_seekable_logFrame( + &zcs->framelog, zcs->frameCSize, zcs->frameDSize, + zcs->framelog.checksumFlag + ? XXH64_digest(&zcs->xxhState) & 0xFFFFFFFFU + : 0); + if (ret) return ret; + + /* reset for the next frame */ + zcs->frameCSize = 0; + zcs->frameDSize = 0; + + ZSTD_resetCStream(zcs->cstream, 0); + if (zcs->framelog.checksumFlag) + XXH64_reset(&zcs->xxhState, 0); + + return 0; +} + +size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const BYTE* const inBase = (const BYTE*) input->src + input->pos; + size_t inLen = input->size - input->pos; + + inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize)); + + /* if we haven't finished flushing the last frame, don't start writing a new one */ + if (inLen > 0) { + ZSTD_inBuffer inTmp = { inBase, inLen, 0 }; + size_t const prevOutPos = output->pos; + + size_t const ret = ZSTD_compressStream(zcs->cstream, output, &inTmp); + + if (zcs->framelog.checksumFlag) { + XXH64_update(&zcs->xxhState, inBase, inTmp.pos); + } + + zcs->frameCSize += output->pos - prevOutPos; + zcs->frameDSize += inTmp.pos; + + input->pos += inTmp.pos; + + if (ZSTD_isError(ret)) return ret; + } + + if (zcs->maxFrameSize == zcs->frameDSize) { + /* log the frame and start over */ + size_t const ret = ZSTD_seekable_endFrame(zcs, output); + if (ZSTD_isError(ret)) return ret; + + /* get the client ready for the next frame */ + return (size_t)zcs->maxFrameSize; + } + + return (size_t)(zcs->maxFrameSize - zcs->frameDSize); +} + +static inline size_t ZSTD_seekable_seekTableSize(const ZSTD_frameLog* fl) +{ + size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0); + size_t const seekTableLen = ZSTD_skippableHeaderSize + + sizePerFrame * fl->size + + ZSTD_seekTableFooterSize; + + return seekTableLen; +} + +static inline size_t ZSTD_stwrite32(ZSTD_frameLog* fl, + ZSTD_outBuffer* output, U32 const value, + U32 const offset) +{ + if (fl->seekTablePos < offset + 4) { + BYTE tmp[4]; /* so that we can work with buffers too small to write a whole word to */ + size_t const lenWrite = + MIN(output->size - output->pos, offset + 4 - fl->seekTablePos); + MEM_writeLE32(tmp, value); + memcpy((BYTE*)output->dst + output->pos, + tmp + (fl->seekTablePos - offset), lenWrite); + output->pos += lenWrite; + fl->seekTablePos += lenWrite; + + if (lenWrite < 4) return ZSTD_seekable_seekTableSize(fl) - fl->seekTablePos; + } + return 0; +} + +size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) +{ + /* seekTableIndex: the current index in the table and + * seekTableSize: the amount of the table written so far + * + * This function is written this way so that if it has to return early + * because of a small buffer, it can keep going where it left off. + */ + + size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0); + size_t const seekTableLen = ZSTD_seekable_seekTableSize(fl); + + CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_MAGIC_SKIPPABLE_START | 0xE, 0)); + CHECK_Z(ZSTD_stwrite32(fl, output, seekTableLen - ZSTD_skippableHeaderSize, + 4)); + + while (fl->seekTableIndex < fl->size) { + CHECK_Z(ZSTD_stwrite32(fl, output, + fl->entries[fl->seekTableIndex].cSize, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 0)); + + CHECK_Z(ZSTD_stwrite32(fl, output, + fl->entries[fl->seekTableIndex].dSize, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 4)); + + if (fl->checksumFlag) { + CHECK_Z(ZSTD_stwrite32( + fl, output, fl->entries[fl->seekTableIndex].checksum, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 8)); + } + + fl->seekTableIndex++; + } + + CHECK_Z(ZSTD_stwrite32(fl, output, fl->size, + seekTableLen - ZSTD_seekTableFooterSize)); + + if (output->size - output->pos < 1) return seekTableLen - fl->seekTablePos; + if (fl->seekTablePos < seekTableLen - 4) { + BYTE sfd = 0; + sfd |= (fl->checksumFlag) << 7; + + ((BYTE*)output->dst)[output->pos] = sfd; + output->pos++; + fl->seekTablePos++; + } + + CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_SEEKABLE_MAGICNUMBER, + seekTableLen - 4)); + + if (fl->seekTablePos != seekTableLen) return ERROR(GENERIC); + return 0; +} + +size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) +{ + if (!zcs->writingSeekTable && zcs->frameDSize) { + const size_t endFrame = ZSTD_seekable_endFrame(zcs, output); + if (ZSTD_isError(endFrame)) return endFrame; + /* return an accurate size hint */ + if (endFrame) return endFrame + ZSTD_seekable_seekTableSize(&zcs->framelog); + } + + zcs->writingSeekTable = 1; + + return ZSTD_seekable_writeSeekTable(&zcs->framelog, output); +} Property changes on: vendor/zstd/dist/contrib/seekable_format/zstdseek_compress.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/contrib/seekable_format/zstdseek_decompress.c =================================================================== --- vendor/zstd/dist/contrib/seekable_format/zstdseek_decompress.c (nonexistent) +++ vendor/zstd/dist/contrib/seekable_format/zstdseek_decompress.c (revision 325597) @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* ********************************************************* +* Turn on Large Files support (>4GB) for 32-bit Linux/Unix +***********************************************************/ +#if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */ +# if !defined(_FILE_OFFSET_BITS) +# define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */ +# endif +# if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */ +# define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */ +# endif +# if defined(_AIX) || defined(__hpux) +# define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */ +# endif +#endif + +/* ************************************************************ +* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW +***************************************************************/ +#if defined(_MSC_VER) && _MSC_VER >= 1400 +# define LONG_SEEK _fseeki64 +#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ +# define LONG_SEEK fseeko +#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) +# define LONG_SEEK fseeko64 +#elif defined(_WIN32) && !defined(__DJGPP__) +# include + static int LONG_SEEK(FILE* file, __int64 offset, int origin) { + LARGE_INTEGER off; + DWORD method; + off.QuadPart = offset; + if (origin == SEEK_END) + method = FILE_END; + else if (origin == SEEK_CUR) + method = FILE_CURRENT; + else + method = FILE_BEGIN; + + if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) + return 0; + else + return -1; + } +#else +# define LONG_SEEK fseek +#endif + +#include /* malloc, free */ +#include /* FILE* */ + +#define XXH_STATIC_LINKING_ONLY +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "mem.h" +#include "zstd_seekable.h" + +#undef ERROR +#define ERROR(name) ((size_t)-ZSTD_error_##name) + +#define CHECK_IO(f) { int const errcod = (f); if (errcod < 0) return ERROR(seekableIO); } + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +/* Special-case callbacks for FILE* and in-memory modes, so that we can treat + * them the same way as the advanced API */ +static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n) +{ + size_t const result = fread(buffer, 1, n, (FILE*)opaque); + if (result != n) { + return -1; + } + return 0; +} + +static int ZSTD_seekable_seek_FILE(void* opaque, S64 offset, int origin) +{ + int const ret = LONG_SEEK((FILE*)opaque, offset, origin); + if (ret) return ret; + return fflush((FILE*)opaque); +} + +typedef struct { + const void *ptr; + size_t size; + size_t pos; +} buffWrapper_t; + +static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n) +{ + buffWrapper_t* buff = (buffWrapper_t*) opaque; + if (buff->size + n > buff->pos) return -1; + memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n); + buff->pos += n; + return 0; +} + +static int ZSTD_seekable_seek_buff(void* opaque, S64 offset, int origin) +{ + buffWrapper_t* buff = (buffWrapper_t*) opaque; + unsigned long long newOffset; + switch (origin) { + case SEEK_SET: + newOffset = offset; + break; + case SEEK_CUR: + newOffset = (unsigned long long)buff->pos + offset; + break; + case SEEK_END: + newOffset = (unsigned long long)buff->size - offset; + break; + } + if (newOffset < 0 || newOffset > buff->size) { + return -1; + } + buff->pos = newOffset; + return 0; +} + +typedef struct { + U64 cOffset; + U64 dOffset; + U32 checksum; +} seekEntry_t; + +typedef struct { + seekEntry_t* entries; + size_t tableLen; + + int checksumFlag; +} seekTable_t; + +#define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_ABSOLUTEMAX + +struct ZSTD_seekable_s { + ZSTD_DStream* dstream; + seekTable_t seekTable; + ZSTD_seekable_customFile src; + + U64 decompressedOffset; + U32 curFrame; + + BYTE inBuff[SEEKABLE_BUFF_SIZE]; /* need to do our own input buffering */ + BYTE outBuff[SEEKABLE_BUFF_SIZE]; /* so we can efficiently decompress the + starts of chunks before we get to the + desired section */ + ZSTD_inBuffer in; /* maintain continuity across ZSTD_seekable_decompress operations */ + buffWrapper_t buffWrapper; /* for `src.opaque` in in-memory mode */ + + XXH64_state_t xxhState; +}; + +ZSTD_seekable* ZSTD_seekable_create(void) +{ + ZSTD_seekable* zs = malloc(sizeof(ZSTD_seekable)); + + if (zs == NULL) return NULL; + + /* also initializes stage to zsds_init */ + memset(zs, 0, sizeof(*zs)); + + zs->dstream = ZSTD_createDStream(); + if (zs->dstream == NULL) { + free(zs); + return NULL; + } + + return zs; +} + +size_t ZSTD_seekable_free(ZSTD_seekable* zs) +{ + if (zs == NULL) return 0; /* support free on null */ + ZSTD_freeDStream(zs->dstream); + free(zs->seekTable.entries); + free(zs); + + return 0; +} + +/** ZSTD_seekable_offsetToFrameIndex() : + * Performs a binary search to find the last frame with a decompressed offset + * <= pos + * @return : the frame's index */ +U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, U64 pos) +{ + U32 lo = 0; + U32 hi = zs->seekTable.tableLen; + + if (pos >= zs->seekTable.entries[zs->seekTable.tableLen].dOffset) { + return zs->seekTable.tableLen; + } + + while (lo + 1 < hi) { + U32 const mid = lo + ((hi - lo) >> 1); + if (zs->seekTable.entries[mid].dOffset <= pos) { + lo = mid; + } else { + hi = mid; + } + } + return lo; +} + +U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs) +{ + return zs->seekTable.tableLen; +} + +U64 ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return zs->seekTable.entries[frameIndex].cOffset; +} + +U64 ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return zs->seekTable.entries[frameIndex].dOffset; +} + +size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); + return zs->seekTable.entries[frameIndex + 1].cOffset - + zs->seekTable.entries[frameIndex].cOffset; +} + +size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); + return zs->seekTable.entries[frameIndex + 1].dOffset - + zs->seekTable.entries[frameIndex].dOffset; +} + +static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) +{ + int checksumFlag; + ZSTD_seekable_customFile src = zs->src; + /* read the footer, fixed size */ + CHECK_IO(src.seek(src.opaque, -(int)ZSTD_seekTableFooterSize, SEEK_END)); + CHECK_IO(src.read(src.opaque, zs->inBuff, ZSTD_seekTableFooterSize)); + + if (MEM_readLE32(zs->inBuff + 5) != ZSTD_SEEKABLE_MAGICNUMBER) { + return ERROR(prefix_unknown); + } + + { BYTE const sfd = zs->inBuff[4]; + checksumFlag = sfd >> 7; + + /* check reserved bits */ + if ((checksumFlag >> 2) & 0x1f) { + return ERROR(corruption_detected); + } + } + + { U32 const numFrames = MEM_readLE32(zs->inBuff); + U32 const sizePerEntry = 8 + (checksumFlag?4:0); + U32 const tableSize = sizePerEntry * numFrames; + U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_skippableHeaderSize; + + U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */ + { + U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); + + CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END)); + CHECK_IO(src.read(src.opaque, zs->inBuff, toRead)); + + remaining -= toRead; + } + + if (MEM_readLE32(zs->inBuff) != (ZSTD_MAGIC_SKIPPABLE_START | 0xE)) { + return ERROR(prefix_unknown); + } + if (MEM_readLE32(zs->inBuff+4) + ZSTD_skippableHeaderSize != frameSize) { + return ERROR(prefix_unknown); + } + + { /* Allocate an extra entry at the end so that we can do size + * computations on the last element without special case */ + seekEntry_t* entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1)); + const BYTE* tableBase = zs->inBuff + ZSTD_skippableHeaderSize; + + U32 idx = 0; + U32 pos = 8; + + + U64 cOffset = 0; + U64 dOffset = 0; + + if (!entries) { + free(entries); + return ERROR(memory_allocation); + } + + /* compute cumulative positions */ + for (; idx < numFrames; idx++) { + if (pos + sizePerEntry > SEEKABLE_BUFF_SIZE) { + U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); + U32 const offset = SEEKABLE_BUFF_SIZE - pos; + memmove(zs->inBuff, zs->inBuff + pos, offset); /* move any data we haven't read yet */ + CHECK_IO(src.read(src.opaque, zs->inBuff+offset, toRead)); + remaining -= toRead; + pos = 0; + } + entries[idx].cOffset = cOffset; + entries[idx].dOffset = dOffset; + + cOffset += MEM_readLE32(zs->inBuff + pos); + pos += 4; + dOffset += MEM_readLE32(zs->inBuff + pos); + pos += 4; + if (checksumFlag) { + entries[idx].checksum = MEM_readLE32(zs->inBuff + pos); + pos += 4; + } + } + entries[numFrames].cOffset = cOffset; + entries[numFrames].dOffset = dOffset; + + zs->seekTable.entries = entries; + zs->seekTable.tableLen = numFrames; + zs->seekTable.checksumFlag = checksumFlag; + return 0; + } + } +} + +size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize) +{ + zs->buffWrapper = (buffWrapper_t){src, srcSize, 0}; + { ZSTD_seekable_customFile srcFile = {&zs->buffWrapper, + &ZSTD_seekable_read_buff, + &ZSTD_seekable_seek_buff}; + return ZSTD_seekable_initAdvanced(zs, srcFile); } +} + +size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src) +{ + ZSTD_seekable_customFile srcFile = {src, &ZSTD_seekable_read_FILE, + &ZSTD_seekable_seek_FILE}; + return ZSTD_seekable_initAdvanced(zs, srcFile); +} + +size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src) +{ + zs->src = src; + + { const size_t seekTableInit = ZSTD_seekable_loadSeekTable(zs); + if (ZSTD_isError(seekTableInit)) return seekTableInit; } + + zs->decompressedOffset = (U64)-1; + zs->curFrame = (U32)-1; + + { const size_t dstreamInit = ZSTD_initDStream(zs->dstream); + if (ZSTD_isError(dstreamInit)) return dstreamInit; } + return 0; +} + +size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, U64 offset) +{ + U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset); + do { + /* check if we can continue from a previous decompress job */ + if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) { + zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset; + zs->curFrame = targetFrame; + + CHECK_IO(zs->src.seek(zs->src.opaque, + zs->seekTable.entries[targetFrame].cOffset, + SEEK_SET)); + zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0}; + XXH64_reset(&zs->xxhState, 0); + ZSTD_resetDStream(zs->dstream); + } + + while (zs->decompressedOffset < offset + len) { + size_t toRead; + ZSTD_outBuffer outTmp; + size_t prevOutPos; + if (zs->decompressedOffset < offset) { + /* dummy decompressions until we get to the target offset */ + outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0}; + } else { + outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset}; + } + + prevOutPos = outTmp.pos; + toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in); + if (ZSTD_isError(toRead)) { + return toRead; + } + + if (zs->seekTable.checksumFlag) { + XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos, + outTmp.pos - prevOutPos); + } + zs->decompressedOffset += outTmp.pos - prevOutPos; + + if (toRead == 0) { + /* frame complete */ + + /* verify checksum */ + if (zs->seekTable.checksumFlag && + (XXH64_digest(&zs->xxhState) & 0xFFFFFFFFU) != + zs->seekTable.entries[targetFrame].checksum) { + return ERROR(corruption_detected); + } + + if (zs->decompressedOffset < offset + len) { + /* go back to the start and force a reset of the stream */ + targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset); + } + break; + } + + /* read in more data if we're done with this buffer */ + if (zs->in.pos == zs->in.size) { + toRead = MIN(toRead, SEEKABLE_BUFF_SIZE); + CHECK_IO(zs->src.read(zs->src.opaque, zs->inBuff, toRead)); + zs->in.size = toRead; + zs->in.pos = 0; + } + } + } while (zs->decompressedOffset != offset + len); + + return len; +} + +size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) { + return ERROR(frameIndex_tooLarge); + } + + { + size_t const decompressedSize = + zs->seekTable.entries[frameIndex + 1].dOffset - + zs->seekTable.entries[frameIndex].dOffset; + if (dstSize < decompressedSize) { + return ERROR(dstSize_tooSmall); + } + return ZSTD_seekable_decompress( + zs, dst, decompressedSize, + zs->seekTable.entries[frameIndex].dOffset); + } +} Property changes on: vendor/zstd/dist/contrib/seekable_format/zstdseek_decompress.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/doc/educational_decoder/Makefile =================================================================== --- vendor/zstd/dist/doc/educational_decoder/Makefile (nonexistent) +++ vendor/zstd/dist/doc/educational_decoder/Makefile (revision 325597) @@ -0,0 +1,34 @@ +HARNESS_FILES=*.c + +MULTITHREAD_LDFLAGS = -pthread +DEBUGFLAGS= -g -DZSTD_DEBUG=1 +CPPFLAGS += -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \ + -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) +CFLAGS ?= -O3 +CFLAGS += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ + -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ + -Wstrict-prototypes -Wundef -Wformat-security \ + -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ + -Wredundant-decls +CFLAGS += $(DEBUGFLAGS) +CFLAGS += $(MOREFLAGS) +FLAGS = $(CPPFLAGS) $(CFLAGS) $(LDFLAGS) $(MULTITHREAD_LDFLAGS) + +harness: $(HARNESS_FILES) + $(CC) $(FLAGS) $^ -o $@ + +clean: + @$(RM) -f harness + @$(RM) -rf harness.dSYM + +test: harness + @zstd README.md -o tmp.zst + @./harness tmp.zst tmp + @diff -s tmp README.md + @$(RM) -f tmp* + @zstd --train harness.c zstd_decompress.c zstd_decompress.h README.md + @zstd -D dictionary README.md -o tmp.zst + @./harness tmp.zst tmp dictionary + @diff -s tmp README.md + @$(RM) -f tmp* dictionary + @make clean Property changes on: vendor/zstd/dist/doc/educational_decoder/Makefile ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/doc/educational_decoder/harness.c =================================================================== --- vendor/zstd/dist/doc/educational_decoder/harness.c (revision 325596) +++ vendor/zstd/dist/doc/educational_decoder/harness.c (revision 325597) @@ -1,125 +1,125 @@ /* * Copyright (c) 2017-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ #include #include #include "zstd_decompress.h" typedef unsigned char u8; // If the data doesn't have decompressed size with it, fallback on assuming the // compression ratio is at most 16 #define MAX_COMPRESSION_RATIO (16) // Protect against allocating too much memory for output #define MAX_OUTPUT_SIZE ((size_t)1024 * 1024 * 1024) u8 *input; u8 *output; u8 *dict; size_t read_file(const char *path, u8 **ptr) { FILE *f = fopen(path, "rb"); if (!f) { fprintf(stderr, "failed to open file %s\n", path); exit(1); } fseek(f, 0L, SEEK_END); size_t size = ftell(f); rewind(f); *ptr = malloc(size); if (!ptr) { fprintf(stderr, "failed to allocate memory to hold %s\n", path); exit(1); } size_t pos = 0; while (!feof(f)) { size_t read = fread(&(*ptr)[pos], 1, size, f); if (ferror(f)) { fprintf(stderr, "error while reading file %s\n", path); exit(1); } pos += read; } fclose(f); return pos; } void write_file(const char *path, const u8 *ptr, size_t size) { FILE *f = fopen(path, "wb"); size_t written = 0; while (written < size) { written += fwrite(&ptr[written], 1, size, f); if (ferror(f)) { fprintf(stderr, "error while writing file %s\n", path); exit(1); } } fclose(f); } int main(int argc, char **argv) { if (argc < 3) { fprintf(stderr, "usage: %s [dictionary]\n", argv[0]); return 1; } size_t input_size = read_file(argv[1], &input); size_t dict_size = 0; if (argc >= 4) { dict_size = read_file(argv[3], &dict); } size_t decompressed_size = ZSTD_get_decompressed_size(input, input_size); if (decompressed_size == (size_t)-1) { decompressed_size = MAX_COMPRESSION_RATIO * input_size; fprintf(stderr, "WARNING: Compressed data does not contain " "decompressed size, going to assume the compression " "ratio is at most %d (decompressed size of at most " "%zu)\n", MAX_COMPRESSION_RATIO, decompressed_size); } if (decompressed_size > MAX_OUTPUT_SIZE) { fprintf(stderr, "Required output size too large for this implementation\n"); return 1; } output = malloc(decompressed_size); if (!output) { fprintf(stderr, "failed to allocate memory\n"); return 1; } dictionary_t* const parsed_dict = create_dictionary(); if (dict) { parse_dictionary(parsed_dict, dict, dict_size); } size_t decompressed = ZSTD_decompress_with_dict(output, decompressed_size, input, input_size, parsed_dict); free_dictionary(parsed_dict); write_file(argv[2], output, decompressed); free(input); free(output); free(dict); input = output = dict = NULL; } Index: vendor/zstd/dist/doc/educational_decoder/zstd_decompress.c =================================================================== --- vendor/zstd/dist/doc/educational_decoder/zstd_decompress.c (revision 325596) +++ vendor/zstd/dist/doc/educational_decoder/zstd_decompress.c (revision 325597) @@ -1,2303 +1,2303 @@ /* * Copyright (c) 2017-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ /// Zstandard educational decoder implementation /// See https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md #include #include #include #include #include "zstd_decompress.h" /******* UTILITY MACROS AND TYPES *********************************************/ // Max block size decompressed size is 128 KB and literal blocks can't be // larger than their block #define MAX_LITERALS_SIZE ((size_t)128 * 1024) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) /// This decoder calls exit(1) when it encounters an error, however a production /// library should propagate error codes #define ERROR(s) \ do { \ fprintf(stderr, "Error: %s\n", s); \ exit(1); \ } while (0) #define INP_SIZE() \ ERROR("Input buffer smaller than it should be or input is " \ "corrupted") #define OUT_SIZE() ERROR("Output buffer too small for output") #define CORRUPTION() ERROR("Corruption detected while decompressing") #define BAD_ALLOC() ERROR("Memory allocation error") #define IMPOSSIBLE() ERROR("An impossibility has occurred") typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; typedef int8_t i8; typedef int16_t i16; typedef int32_t i32; typedef int64_t i64; /******* END UTILITY MACROS AND TYPES *****************************************/ /******* IMPLEMENTATION PRIMITIVE PROTOTYPES **********************************/ /// The implementations for these functions can be found at the bottom of this /// file. They implement low-level functionality needed for the higher level /// decompression functions. /*** IO STREAM OPERATIONS *************/ /// ostream_t/istream_t are used to wrap the pointers/length data passed into /// ZSTD_decompress, so that all IO operations are safely bounds checked /// They are written/read forward, and reads are treated as little-endian /// They should be used opaquely to ensure safety typedef struct { u8 *ptr; size_t len; } ostream_t; typedef struct { const u8 *ptr; size_t len; // Input often reads a few bits at a time, so maintain an internal offset int bit_offset; } istream_t; /// The following two functions are the only ones that allow the istream to be /// non-byte aligned /// Reads `num` bits from a bitstream, and updates the internal offset static inline u64 IO_read_bits(istream_t *const in, const int num_bits); /// Backs-up the stream by `num` bits so they can be read again static inline void IO_rewind_bits(istream_t *const in, const int num_bits); /// If the remaining bits in a byte will be unused, advance to the end of the /// byte static inline void IO_align_stream(istream_t *const in); /// Write the given byte into the output stream static inline void IO_write_byte(ostream_t *const out, u8 symb); /// Returns the number of bytes left to be read in this stream. The stream must /// be byte aligned. static inline size_t IO_istream_len(const istream_t *const in); /// Advances the stream by `len` bytes, and returns a pointer to the chunk that /// was skipped. The stream must be byte aligned. static inline const u8 *IO_get_read_ptr(istream_t *const in, size_t len); /// Advances the stream by `len` bytes, and returns a pointer to the chunk that /// was skipped so it can be written to. static inline u8 *IO_get_write_ptr(ostream_t *const out, size_t len); /// Advance the inner state by `len` bytes. The stream must be byte aligned. static inline void IO_advance_input(istream_t *const in, size_t len); /// Returns an `ostream_t` constructed from the given pointer and length. static inline ostream_t IO_make_ostream(u8 *out, size_t len); /// Returns an `istream_t` constructed from the given pointer and length. static inline istream_t IO_make_istream(const u8 *in, size_t len); /// Returns an `istream_t` with the same base as `in`, and length `len`. /// Then, advance `in` to account for the consumed bytes. /// `in` must be byte aligned. static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len); /*** END IO STREAM OPERATIONS *********/ /*** BITSTREAM OPERATIONS *************/ /// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits, /// and return them interpreted as a little-endian unsigned integer. static inline u64 read_bits_LE(const u8 *src, const int num_bits, const size_t offset); /// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so /// it updates `offset` to `offset - bits`, and then reads `bits` bits from /// `src + offset`. If the offset becomes negative, the extra bits at the /// bottom are filled in with `0` bits instead of reading from before `src`. static inline u64 STREAM_read_bits(const u8 *src, const int bits, i64 *const offset); /*** END BITSTREAM OPERATIONS *********/ /*** BIT COUNTING OPERATIONS **********/ /// Returns the index of the highest set bit in `num`, or `-1` if `num == 0` static inline int highest_set_bit(const u64 num); /*** END BIT COUNTING OPERATIONS ******/ /*** HUFFMAN PRIMITIVES ***************/ // Table decode method uses exponential memory, so we need to limit depth #define HUF_MAX_BITS (16) // Limit the maximum number of symbols to 256 so we can store a symbol in a byte #define HUF_MAX_SYMBS (256) /// Structure containing all tables necessary for efficient Huffman decoding typedef struct { u8 *symbols; u8 *num_bits; int max_bits; } HUF_dtable; /// Decode a single symbol and read in enough bits to refresh the state static inline u8 HUF_decode_symbol(const HUF_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset); /// Read in a full state's worth of bits to initialize it static inline void HUF_init_state(const HUF_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset); /// Decompresses a single Huffman stream, returns the number of bytes decoded. /// `src_len` must be the exact length of the Huffman-coded block. static size_t HUF_decompress_1stream(const HUF_dtable *const dtable, ostream_t *const out, istream_t *const in); /// Same as previous but decodes 4 streams, formatted as in the Zstandard /// specification. /// `src_len` must be the exact length of the Huffman-coded block. static size_t HUF_decompress_4stream(const HUF_dtable *const dtable, ostream_t *const out, istream_t *const in); /// Initialize a Huffman decoding table using the table of bit counts provided static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, const int num_symbs); /// Initialize a Huffman decoding table using the table of weights provided /// Weights follow the definition provided in the Zstandard specification static void HUF_init_dtable_usingweights(HUF_dtable *const table, const u8 *const weights, const int num_symbs); /// Free the malloc'ed parts of a decoding table static void HUF_free_dtable(HUF_dtable *const dtable); /// Deep copy a decoding table, so that it can be used and free'd without /// impacting the source table. static void HUF_copy_dtable(HUF_dtable *const dst, const HUF_dtable *const src); /*** END HUFFMAN PRIMITIVES ***********/ /*** FSE PRIMITIVES *******************/ /// For more description of FSE see /// https://github.com/Cyan4973/FiniteStateEntropy/ // FSE table decoding uses exponential memory, so limit the maximum accuracy #define FSE_MAX_ACCURACY_LOG (15) // Limit the maximum number of symbols so they can be stored in a single byte #define FSE_MAX_SYMBS (256) /// The tables needed to decode FSE encoded streams typedef struct { u8 *symbols; u8 *num_bits; u16 *new_state_base; int accuracy_log; } FSE_dtable; /// Return the symbol for the current state static inline u8 FSE_peek_symbol(const FSE_dtable *const dtable, const u16 state); /// Read the number of bits necessary to update state, update, and shift offset /// back to reflect the bits read static inline void FSE_update_state(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset); /// Combine peek and update: decode a symbol and update the state static inline u8 FSE_decode_symbol(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset); /// Read bits from the stream to initialize the state and shift offset back static inline void FSE_init_state(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset); /// Decompress two interleaved bitstreams (e.g. compressed Huffman weights) /// using an FSE decoding table. `src_len` must be the exact length of the /// block. static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable, ostream_t *const out, istream_t *const in); /// Initialize a decoding table using normalized frequencies. static void FSE_init_dtable(FSE_dtable *const dtable, const i16 *const norm_freqs, const int num_symbs, const int accuracy_log); /// Decode an FSE header as defined in the Zstandard format specification and /// use the decoded frequencies to initialize a decoding table. static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in, const int max_accuracy_log); /// Initialize an FSE table that will always return the same symbol and consume /// 0 bits per symbol, to be used for RLE mode in sequence commands static void FSE_init_dtable_rle(FSE_dtable *const dtable, const u8 symb); /// Free the malloc'ed parts of a decoding table static void FSE_free_dtable(FSE_dtable *const dtable); /// Deep copy a decoding table, so that it can be used and free'd without /// impacting the source table. static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src); /*** END FSE PRIMITIVES ***************/ /******* END IMPLEMENTATION PRIMITIVE PROTOTYPES ******************************/ /******* ZSTD HELPER STRUCTS AND PROTOTYPES ***********************************/ /// A small structure that can be reused in various places that need to access /// frame header information typedef struct { // The size of window that we need to be able to contiguously store for // references size_t window_size; // The total output size of this compressed frame size_t frame_content_size; // The dictionary id if this frame uses one u32 dictionary_id; // Whether or not the content of this frame has a checksum int content_checksum_flag; // Whether or not the output for this frame is in a single segment int single_segment_flag; } frame_header_t; /// The context needed to decode blocks in a frame typedef struct { frame_header_t header; // The total amount of data available for backreferences, to determine if an // offset too large to be correct size_t current_total_output; const u8 *dict_content; size_t dict_content_len; // Entropy encoding tables so they can be repeated by future blocks instead // of retransmitting HUF_dtable literals_dtable; FSE_dtable ll_dtable; FSE_dtable ml_dtable; FSE_dtable of_dtable; // The last 3 offsets for the special "repeat offsets". u64 previous_offsets[3]; } frame_context_t; /// The decoded contents of a dictionary so that it doesn't have to be repeated /// for each frame that uses it struct dictionary_s { // Entropy tables HUF_dtable literals_dtable; FSE_dtable ll_dtable; FSE_dtable ml_dtable; FSE_dtable of_dtable; // Raw content for backreferences u8 *content; size_t content_size; // Offset history to prepopulate the frame's history u64 previous_offsets[3]; u32 dictionary_id; }; /// A tuple containing the parts necessary to decode and execute a ZSTD sequence /// command typedef struct { u32 literal_length; u32 match_length; u32 offset; } sequence_command_t; /// The decoder works top-down, starting at the high level like Zstd frames, and /// working down to lower more technical levels such as blocks, literals, and /// sequences. The high-level functions roughly follow the outline of the /// format specification: /// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md /// Before the implementation of each high-level function declared here, the /// prototypes for their helper functions are defined and explained /// Decode a single Zstd frame, or error if the input is not a valid frame. /// Accepts a dict argument, which may be NULL indicating no dictionary. /// See /// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame-concatenation static void decode_frame(ostream_t *const out, istream_t *const in, const dictionary_t *const dict); // Decode data in a compressed block static void decompress_block(frame_context_t *const ctx, ostream_t *const out, istream_t *const in); // Decode the literals section of a block static size_t decode_literals(frame_context_t *const ctx, istream_t *const in, u8 **const literals); // Decode the sequences part of a block static size_t decode_sequences(frame_context_t *const ctx, istream_t *const in, sequence_command_t **const sequences); // Execute the decoded sequences on the literals block static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, const u8 *const literals, const size_t literals_len, const sequence_command_t *const sequences, const size_t num_sequences); // Copies literals and returns the total literal length that was copied static u32 copy_literals(const size_t seq, istream_t *litstream, ostream_t *const out); // Given an offset code from a sequence command (either an actual offset value // or an index for previous offset), computes the correct offset and udpates // the offset history static size_t compute_offset(sequence_command_t seq, u64 *const offset_hist); // Given an offset, match length, and total output, as well as the frame // context for the dictionary, determines if the dictionary is used and // executes the copy operation static void execute_match_copy(frame_context_t *const ctx, size_t offset, size_t match_length, size_t total_output, ostream_t *const out); /******* END ZSTD HELPER STRUCTS AND PROTOTYPES *******************************/ size_t ZSTD_decompress(void *const dst, const size_t dst_len, const void *const src, const size_t src_len) { dictionary_t* uninit_dict = create_dictionary(); size_t const decomp_size = ZSTD_decompress_with_dict(dst, dst_len, src, src_len, uninit_dict); free_dictionary(uninit_dict); return decomp_size; } size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len, const void *const src, const size_t src_len, dictionary_t* parsed_dict) { istream_t in = IO_make_istream(src, src_len); ostream_t out = IO_make_ostream(dst, dst_len); // "A content compressed by Zstandard is transformed into a Zstandard frame. // Multiple frames can be appended into a single file or stream. A frame is // totally independent, has a defined beginning and end, and a set of // parameters which tells the decoder how to decompress it." /* this decoder assumes decompression of a single frame */ decode_frame(&out, &in, parsed_dict); return out.ptr - (u8 *)dst; } /******* FRAME DECODING ******************************************************/ static void decode_data_frame(ostream_t *const out, istream_t *const in, const dictionary_t *const dict); static void init_frame_context(frame_context_t *const context, istream_t *const in, const dictionary_t *const dict); static void free_frame_context(frame_context_t *const context); static void parse_frame_header(frame_header_t *const header, istream_t *const in); static void frame_context_apply_dict(frame_context_t *const ctx, const dictionary_t *const dict); static void decompress_data(frame_context_t *const ctx, ostream_t *const out, istream_t *const in); static void decode_frame(ostream_t *const out, istream_t *const in, const dictionary_t *const dict) { const u32 magic_number = IO_read_bits(in, 32); // Zstandard frame // // "Magic_Number // // 4 Bytes, little-endian format. Value : 0xFD2FB528" if (magic_number == 0xFD2FB528U) { // ZSTD frame decode_data_frame(out, in, dict); return; } // not a real frame or a skippable frame ERROR("Tried to decode non-ZSTD frame"); } /// Decode a frame that contains compressed data. Not all frames do as there /// are skippable frames. /// See /// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#general-structure-of-zstandard-frame-format static void decode_data_frame(ostream_t *const out, istream_t *const in, const dictionary_t *const dict) { frame_context_t ctx; // Initialize the context that needs to be carried from block to block init_frame_context(&ctx, in, dict); if (ctx.header.frame_content_size != 0 && ctx.header.frame_content_size > out->len) { OUT_SIZE(); } decompress_data(&ctx, out, in); free_frame_context(&ctx); } /// Takes the information provided in the header and dictionary, and initializes /// the context for this frame static void init_frame_context(frame_context_t *const context, istream_t *const in, const dictionary_t *const dict) { // Most fields in context are correct when initialized to 0 memset(context, 0, sizeof(frame_context_t)); // Parse data from the frame header parse_frame_header(&context->header, in); // Set up the offset history for the repeat offset commands context->previous_offsets[0] = 1; context->previous_offsets[1] = 4; context->previous_offsets[2] = 8; // Apply details from the dict if it exists frame_context_apply_dict(context, dict); } static void free_frame_context(frame_context_t *const context) { HUF_free_dtable(&context->literals_dtable); FSE_free_dtable(&context->ll_dtable); FSE_free_dtable(&context->ml_dtable); FSE_free_dtable(&context->of_dtable); memset(context, 0, sizeof(frame_context_t)); } static void parse_frame_header(frame_header_t *const header, istream_t *const in) { // "The first header's byte is called the Frame_Header_Descriptor. It tells // which other fields are present. Decoding this byte is enough to tell the // size of Frame_Header. // // Bit number Field name // 7-6 Frame_Content_Size_flag // 5 Single_Segment_flag // 4 Unused_bit // 3 Reserved_bit // 2 Content_Checksum_flag // 1-0 Dictionary_ID_flag" const u8 descriptor = IO_read_bits(in, 8); // decode frame header descriptor into flags const u8 frame_content_size_flag = descriptor >> 6; const u8 single_segment_flag = (descriptor >> 5) & 1; const u8 reserved_bit = (descriptor >> 3) & 1; const u8 content_checksum_flag = (descriptor >> 2) & 1; const u8 dictionary_id_flag = descriptor & 3; if (reserved_bit != 0) { CORRUPTION(); } header->single_segment_flag = single_segment_flag; header->content_checksum_flag = content_checksum_flag; // decode window size if (!single_segment_flag) { // "Provides guarantees on maximum back-reference distance that will be // used within compressed data. This information is important for // decoders to allocate enough memory. // // Bit numbers 7-3 2-0 // Field name Exponent Mantissa" u8 window_descriptor = IO_read_bits(in, 8); u8 exponent = window_descriptor >> 3; u8 mantissa = window_descriptor & 7; // Use the algorithm from the specification to compute window size // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor size_t window_base = (size_t)1 << (10 + exponent); size_t window_add = (window_base / 8) * mantissa; header->window_size = window_base + window_add; } // decode dictionary id if it exists if (dictionary_id_flag) { // "This is a variable size field, which contains the ID of the // dictionary required to properly decode the frame. Note that this // field is optional. When it's not present, it's up to the caller to // make sure it uses the correct dictionary. Format is little-endian." const int bytes_array[] = {0, 1, 2, 4}; const int bytes = bytes_array[dictionary_id_flag]; header->dictionary_id = IO_read_bits(in, bytes * 8); } else { header->dictionary_id = 0; } // decode frame content size if it exists if (single_segment_flag || frame_content_size_flag) { // "This is the original (uncompressed) size. This information is // optional. The Field_Size is provided according to value of // Frame_Content_Size_flag. The Field_Size can be equal to 0 (not // present), 1, 2, 4 or 8 bytes. Format is little-endian." // // if frame_content_size_flag == 0 but single_segment_flag is set, we // still have a 1 byte field const int bytes_array[] = {1, 2, 4, 8}; const int bytes = bytes_array[frame_content_size_flag]; header->frame_content_size = IO_read_bits(in, bytes * 8); if (bytes == 2) { // "When Field_Size is 2, the offset of 256 is added." header->frame_content_size += 256; } } else { header->frame_content_size = 0; } if (single_segment_flag) { // "The Window_Descriptor byte is optional. It is absent when // Single_Segment_flag is set. In this case, the maximum back-reference // distance is the content size itself, which can be any value from 1 to // 2^64-1 bytes (16 EB)." header->window_size = header->frame_content_size; } } /// A dictionary acts as initializing values for the frame context before /// decompression, so we implement it by applying it's predetermined /// tables and content to the context before beginning decompression static void frame_context_apply_dict(frame_context_t *const ctx, const dictionary_t *const dict) { // If the content pointer is NULL then it must be an empty dict if (!dict || !dict->content) return; // If the requested dictionary_id is non-zero, the correct dictionary must // be present if (ctx->header.dictionary_id != 0 && ctx->header.dictionary_id != dict->dictionary_id) { ERROR("Wrong dictionary provided"); } // Copy the dict content to the context for references during sequence // execution ctx->dict_content = dict->content; ctx->dict_content_len = dict->content_size; // If it's a formatted dict copy the precomputed tables in so they can // be used in the table repeat modes if (dict->dictionary_id != 0) { // Deep copy the entropy tables so they can be freed independently of // the dictionary struct HUF_copy_dtable(&ctx->literals_dtable, &dict->literals_dtable); FSE_copy_dtable(&ctx->ll_dtable, &dict->ll_dtable); FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable); FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable); // Copy the repeated offsets memcpy(ctx->previous_offsets, dict->previous_offsets, sizeof(ctx->previous_offsets)); } } /// Decompress the data from a frame block by block static void decompress_data(frame_context_t *const ctx, ostream_t *const out, istream_t *const in) { // "A frame encapsulates one or multiple blocks. Each block can be // compressed or not, and has a guaranteed maximum content size, which // depends on frame parameters. Unlike frames, each block depends on // previous blocks for proper decoding. However, each block can be // decompressed without waiting for its successor, allowing streaming // operations." int last_block = 0; do { // "Last_Block // // The lowest bit signals if this block is the last one. Frame ends // right after this block. // // Block_Type and Block_Size // // The next 2 bits represent the Block_Type, while the remaining 21 bits // represent the Block_Size. Format is little-endian." last_block = IO_read_bits(in, 1); const int block_type = IO_read_bits(in, 2); const size_t block_len = IO_read_bits(in, 21); switch (block_type) { case 0: { // "Raw_Block - this is an uncompressed block. Block_Size is the // number of bytes to read and copy." const u8 *const read_ptr = IO_get_read_ptr(in, block_len); u8 *const write_ptr = IO_get_write_ptr(out, block_len); // Copy the raw data into the output memcpy(write_ptr, read_ptr, block_len); ctx->current_total_output += block_len; break; } case 1: { // "RLE_Block - this is a single byte, repeated N times. In which // case, Block_Size is the size to regenerate, while the // "compressed" block is just 1 byte (the byte to repeat)." const u8 *const read_ptr = IO_get_read_ptr(in, 1); u8 *const write_ptr = IO_get_write_ptr(out, block_len); // Copy `block_len` copies of `read_ptr[0]` to the output memset(write_ptr, read_ptr[0], block_len); ctx->current_total_output += block_len; break; } case 2: { // "Compressed_Block - this is a Zstandard compressed block, // detailed in another section of this specification. Block_Size is // the compressed size. // Create a sub-stream for the block istream_t block_stream = IO_make_sub_istream(in, block_len); decompress_block(ctx, out, &block_stream); break; } case 3: // "Reserved - this is not a block. This value cannot be used with // current version of this specification." CORRUPTION(); break; default: IMPOSSIBLE(); } } while (!last_block); if (ctx->header.content_checksum_flag) { // This program does not support checking the checksum, so skip over it // if it's present IO_advance_input(in, 4); } } /******* END FRAME DECODING ***************************************************/ /******* BLOCK DECOMPRESSION **************************************************/ static void decompress_block(frame_context_t *const ctx, ostream_t *const out, istream_t *const in) { // "A compressed block consists of 2 sections : // // Literals_Section // Sequences_Section" // Part 1: decode the literals block u8 *literals = NULL; const size_t literals_size = decode_literals(ctx, in, &literals); // Part 2: decode the sequences block sequence_command_t *sequences = NULL; const size_t num_sequences = decode_sequences(ctx, in, &sequences); // Part 3: combine literals and sequence commands to generate output execute_sequences(ctx, out, literals, literals_size, sequences, num_sequences); free(literals); free(sequences); } /******* END BLOCK DECOMPRESSION **********************************************/ /******* LITERALS DECODING ****************************************************/ static size_t decode_literals_simple(istream_t *const in, u8 **const literals, const int block_type, const int size_format); static size_t decode_literals_compressed(frame_context_t *const ctx, istream_t *const in, u8 **const literals, const int block_type, const int size_format); static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in); static void fse_decode_hufweights(ostream_t *weights, istream_t *const in, int *const num_symbs); static size_t decode_literals(frame_context_t *const ctx, istream_t *const in, u8 **const literals) { // "Literals can be stored uncompressed or compressed using Huffman prefix // codes. When compressed, an optional tree description can be present, // followed by 1 or 4 streams." // // "Literals_Section_Header // // Header is in charge of describing how literals are packed. It's a // byte-aligned variable-size bitfield, ranging from 1 to 5 bytes, using // little-endian convention." // // "Literals_Block_Type // // This field uses 2 lowest bits of first byte, describing 4 different block // types" // // size_format takes between 1 and 2 bits int block_type = IO_read_bits(in, 2); int size_format = IO_read_bits(in, 2); if (block_type <= 1) { // Raw or RLE literals block return decode_literals_simple(in, literals, block_type, size_format); } else { // Huffman compressed literals return decode_literals_compressed(ctx, in, literals, block_type, size_format); } } /// Decodes literals blocks in raw or RLE form static size_t decode_literals_simple(istream_t *const in, u8 **const literals, const int block_type, const int size_format) { size_t size; switch (size_format) { // These cases are in the form ?0 // In this case, the ? bit is actually part of the size field case 0: case 2: // "Size_Format uses 1 bit. Regenerated_Size uses 5 bits (0-31)." IO_rewind_bits(in, 1); size = IO_read_bits(in, 5); break; case 1: // "Size_Format uses 2 bits. Regenerated_Size uses 12 bits (0-4095)." size = IO_read_bits(in, 12); break; case 3: // "Size_Format uses 2 bits. Regenerated_Size uses 20 bits (0-1048575)." size = IO_read_bits(in, 20); break; default: // Size format is in range 0-3 IMPOSSIBLE(); } if (size > MAX_LITERALS_SIZE) { CORRUPTION(); } *literals = malloc(size); if (!*literals) { BAD_ALLOC(); } switch (block_type) { case 0: { // "Raw_Literals_Block - Literals are stored uncompressed." const u8 *const read_ptr = IO_get_read_ptr(in, size); memcpy(*literals, read_ptr, size); break; } case 1: { // "RLE_Literals_Block - Literals consist of a single byte value repeated N times." const u8 *const read_ptr = IO_get_read_ptr(in, 1); memset(*literals, read_ptr[0], size); break; } default: IMPOSSIBLE(); } return size; } /// Decodes Huffman compressed literals static size_t decode_literals_compressed(frame_context_t *const ctx, istream_t *const in, u8 **const literals, const int block_type, const int size_format) { size_t regenerated_size, compressed_size; // Only size_format=0 has 1 stream, so default to 4 int num_streams = 4; switch (size_format) { case 0: // "A single stream. Both Compressed_Size and Regenerated_Size use 10 // bits (0-1023)." num_streams = 1; // Fall through as it has the same size format case 1: // "4 streams. Both Compressed_Size and Regenerated_Size use 10 bits // (0-1023)." regenerated_size = IO_read_bits(in, 10); compressed_size = IO_read_bits(in, 10); break; case 2: // "4 streams. Both Compressed_Size and Regenerated_Size use 14 bits // (0-16383)." regenerated_size = IO_read_bits(in, 14); compressed_size = IO_read_bits(in, 14); break; case 3: // "4 streams. Both Compressed_Size and Regenerated_Size use 18 bits // (0-262143)." regenerated_size = IO_read_bits(in, 18); compressed_size = IO_read_bits(in, 18); break; default: // Impossible IMPOSSIBLE(); } if (regenerated_size > MAX_LITERALS_SIZE || compressed_size >= regenerated_size) { CORRUPTION(); } *literals = malloc(regenerated_size); if (!*literals) { BAD_ALLOC(); } ostream_t lit_stream = IO_make_ostream(*literals, regenerated_size); istream_t huf_stream = IO_make_sub_istream(in, compressed_size); if (block_type == 2) { // Decode the provided Huffman table // "This section is only present when Literals_Block_Type type is // Compressed_Literals_Block (2)." HUF_free_dtable(&ctx->literals_dtable); decode_huf_table(&ctx->literals_dtable, &huf_stream); } else { // If the previous Huffman table is being repeated, ensure it exists if (!ctx->literals_dtable.symbols) { CORRUPTION(); } } size_t symbols_decoded; if (num_streams == 1) { symbols_decoded = HUF_decompress_1stream(&ctx->literals_dtable, &lit_stream, &huf_stream); } else { symbols_decoded = HUF_decompress_4stream(&ctx->literals_dtable, &lit_stream, &huf_stream); } if (symbols_decoded != regenerated_size) { CORRUPTION(); } return regenerated_size; } // Decode the Huffman table description static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in) { // "All literal values from zero (included) to last present one (excluded) // are represented by Weight with values from 0 to Max_Number_of_Bits." // "This is a single byte value (0-255), which describes how to decode the list of weights." const u8 header = IO_read_bits(in, 8); u8 weights[HUF_MAX_SYMBS]; memset(weights, 0, sizeof(weights)); int num_symbs; if (header >= 128) { // "This is a direct representation, where each Weight is written // directly as a 4 bits field (0-15). The full representation occupies // ((Number_of_Symbols+1)/2) bytes, meaning it uses a last full byte // even if Number_of_Symbols is odd. Number_of_Symbols = headerByte - // 127" num_symbs = header - 127; const size_t bytes = (num_symbs + 1) / 2; const u8 *const weight_src = IO_get_read_ptr(in, bytes); for (int i = 0; i < num_symbs; i++) { // "They are encoded forward, 2 // weights to a byte with the first weight taking the top four bits // and the second taking the bottom four (e.g. the following // operations could be used to read the weights: Weight[0] = // (Byte[0] >> 4), Weight[1] = (Byte[0] & 0xf), etc.)." if (i % 2 == 0) { weights[i] = weight_src[i / 2] >> 4; } else { weights[i] = weight_src[i / 2] & 0xf; } } } else { // The weights are FSE encoded, decode them before we can construct the // table istream_t fse_stream = IO_make_sub_istream(in, header); ostream_t weight_stream = IO_make_ostream(weights, HUF_MAX_SYMBS); fse_decode_hufweights(&weight_stream, &fse_stream, &num_symbs); } // Construct the table using the decoded weights HUF_init_dtable_usingweights(dtable, weights, num_symbs); } static void fse_decode_hufweights(ostream_t *weights, istream_t *const in, int *const num_symbs) { const int MAX_ACCURACY_LOG = 7; FSE_dtable dtable; // "An FSE bitstream starts by a header, describing probabilities // distribution. It will create a Decoding Table. For a list of Huffman // weights, maximum accuracy is 7 bits." FSE_decode_header(&dtable, in, MAX_ACCURACY_LOG); // Decode the weights *num_symbs = FSE_decompress_interleaved2(&dtable, weights, in); FSE_free_dtable(&dtable); } /******* END LITERALS DECODING ************************************************/ /******* SEQUENCE DECODING ****************************************************/ /// The combination of FSE states needed to decode sequences typedef struct { FSE_dtable ll_table; FSE_dtable of_table; FSE_dtable ml_table; u16 ll_state; u16 of_state; u16 ml_state; } sequence_states_t; /// Different modes to signal to decode_seq_tables what to do typedef enum { seq_literal_length = 0, seq_offset = 1, seq_match_length = 2, } seq_part_t; typedef enum { seq_predefined = 0, seq_rle = 1, seq_fse = 2, seq_repeat = 3, } seq_mode_t; /// The predefined FSE distribution tables for `seq_predefined` mode static const i16 SEQ_LITERAL_LENGTH_DEFAULT_DIST[36] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1}; static const i16 SEQ_OFFSET_DEFAULT_DIST[29] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; static const i16 SEQ_MATCH_LENGTH_DEFAULT_DIST[53] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1}; /// The sequence decoding baseline and number of additional bits to read/add /// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets static const u32 SEQ_LITERAL_LENGTH_BASELINES[36] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65538}; static const u8 SEQ_LITERAL_LENGTH_EXTRA_BITS[36] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; static const u32 SEQ_MATCH_LENGTH_BASELINES[53] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 131, 259, 515, 1027, 2051, 4099, 8195, 16387, 32771, 65539}; static const u8 SEQ_MATCH_LENGTH_EXTRA_BITS[53] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; /// Offset decoding is simpler so we just need a maximum code value static const u8 SEQ_MAX_CODES[3] = {35, -1, 52}; static void decompress_sequences(frame_context_t *const ctx, istream_t *const in, sequence_command_t *const sequences, const size_t num_sequences); static sequence_command_t decode_sequence(sequence_states_t *const state, const u8 *const src, i64 *const offset); static void decode_seq_table(FSE_dtable *const table, istream_t *const in, const seq_part_t type, const seq_mode_t mode); static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, sequence_command_t **const sequences) { // "A compressed block is a succession of sequences . A sequence is a // literal copy command, followed by a match copy command. A literal copy // command specifies a length. It is the number of bytes to be copied (or // extracted) from the literal section. A match copy command specifies an // offset and a length. The offset gives the position to copy from, which // can be within a previous block." size_t num_sequences; // "Number_of_Sequences // // This is a variable size field using between 1 and 3 bytes. Let's call its // first byte byte0." u8 header = IO_read_bits(in, 8); if (header == 0) { // "There are no sequences. The sequence section stops there. // Regenerated content is defined entirely by literals section." *sequences = NULL; return 0; } else if (header < 128) { // "Number_of_Sequences = byte0 . Uses 1 byte." num_sequences = header; } else if (header < 255) { // "Number_of_Sequences = ((byte0-128) << 8) + byte1 . Uses 2 bytes." num_sequences = ((header - 128) << 8) + IO_read_bits(in, 8); } else { // "Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00 . Uses 3 bytes." num_sequences = IO_read_bits(in, 16) + 0x7F00; } *sequences = malloc(num_sequences * sizeof(sequence_command_t)); if (!*sequences) { BAD_ALLOC(); } decompress_sequences(ctx, in, *sequences, num_sequences); return num_sequences; } /// Decompress the FSE encoded sequence commands static void decompress_sequences(frame_context_t *const ctx, istream_t *in, sequence_command_t *const sequences, const size_t num_sequences) { // "The Sequences_Section regroup all symbols required to decode commands. // There are 3 symbol types : literals lengths, offsets and match lengths. // They are encoded together, interleaved, in a single bitstream." // "Symbol compression modes // // This is a single byte, defining the compression mode of each symbol // type." // // Bit number : Field name // 7-6 : Literals_Lengths_Mode // 5-4 : Offsets_Mode // 3-2 : Match_Lengths_Mode // 1-0 : Reserved u8 compression_modes = IO_read_bits(in, 8); if ((compression_modes & 3) != 0) { // Reserved bits set CORRUPTION(); } // "Following the header, up to 3 distribution tables can be described. When // present, they are in this order : // // Literals lengths // Offsets // Match Lengths" // Update the tables we have stored in the context decode_seq_table(&ctx->ll_dtable, in, seq_literal_length, (compression_modes >> 6) & 3); decode_seq_table(&ctx->of_dtable, in, seq_offset, (compression_modes >> 4) & 3); decode_seq_table(&ctx->ml_dtable, in, seq_match_length, (compression_modes >> 2) & 3); sequence_states_t states; // Initialize the decoding tables { states.ll_table = ctx->ll_dtable; states.of_table = ctx->of_dtable; states.ml_table = ctx->ml_dtable; } const size_t len = IO_istream_len(in); const u8 *const src = IO_get_read_ptr(in, len); // "After writing the last bit containing information, the compressor writes // a single 1-bit and then fills the byte with 0-7 0 bits of padding." const int padding = 8 - highest_set_bit(src[len - 1]); // The offset starts at the end because FSE streams are read backwards i64 bit_offset = len * 8 - padding; // "The bitstream starts with initial state values, each using the required // number of bits in their respective accuracy, decoded previously from // their normalized distribution. // // It starts by Literals_Length_State, followed by Offset_State, and finally // Match_Length_State." FSE_init_state(&states.ll_table, &states.ll_state, src, &bit_offset); FSE_init_state(&states.of_table, &states.of_state, src, &bit_offset); FSE_init_state(&states.ml_table, &states.ml_state, src, &bit_offset); for (size_t i = 0; i < num_sequences; i++) { // Decode sequences one by one sequences[i] = decode_sequence(&states, src, &bit_offset); } if (bit_offset != 0) { CORRUPTION(); } } // Decode a single sequence and update the state static sequence_command_t decode_sequence(sequence_states_t *const states, const u8 *const src, i64 *const offset) { // "Each symbol is a code in its own context, which specifies Baseline and // Number_of_Bits to add. Codes are FSE compressed, and interleaved with raw // additional bits in the same bitstream." // Decode symbols, but don't update states const u8 of_code = FSE_peek_symbol(&states->of_table, states->of_state); const u8 ll_code = FSE_peek_symbol(&states->ll_table, states->ll_state); const u8 ml_code = FSE_peek_symbol(&states->ml_table, states->ml_state); // Offset doesn't need a max value as it's not decoded using a table if (ll_code > SEQ_MAX_CODES[seq_literal_length] || ml_code > SEQ_MAX_CODES[seq_match_length]) { CORRUPTION(); } // Read the interleaved bits sequence_command_t seq; // "Decoding starts by reading the Number_of_Bits required to decode Offset. // It then does the same for Match_Length, and then for Literals_Length." seq.offset = ((u32)1 << of_code) + STREAM_read_bits(src, of_code, offset); seq.match_length = SEQ_MATCH_LENGTH_BASELINES[ml_code] + STREAM_read_bits(src, SEQ_MATCH_LENGTH_EXTRA_BITS[ml_code], offset); seq.literal_length = SEQ_LITERAL_LENGTH_BASELINES[ll_code] + STREAM_read_bits(src, SEQ_LITERAL_LENGTH_EXTRA_BITS[ll_code], offset); // "If it is not the last sequence in the block, the next operation is to // update states. Using the rules pre-calculated in the decoding tables, // Literals_Length_State is updated, followed by Match_Length_State, and // then Offset_State." // If the stream is complete don't read bits to update state if (*offset != 0) { FSE_update_state(&states->ll_table, &states->ll_state, src, offset); FSE_update_state(&states->ml_table, &states->ml_state, src, offset); FSE_update_state(&states->of_table, &states->of_state, src, offset); } return seq; } /// Given a sequence part and table mode, decode the FSE distribution /// Errors if the mode is `seq_repeat` without a pre-existing table in `table` static void decode_seq_table(FSE_dtable *const table, istream_t *const in, const seq_part_t type, const seq_mode_t mode) { // Constant arrays indexed by seq_part_t const i16 *const default_distributions[] = {SEQ_LITERAL_LENGTH_DEFAULT_DIST, SEQ_OFFSET_DEFAULT_DIST, SEQ_MATCH_LENGTH_DEFAULT_DIST}; const size_t default_distribution_lengths[] = {36, 29, 53}; const size_t default_distribution_accuracies[] = {6, 5, 6}; const size_t max_accuracies[] = {9, 8, 9}; if (mode != seq_repeat) { // Free old one before overwriting FSE_free_dtable(table); } switch (mode) { case seq_predefined: { // "Predefined_Mode : uses a predefined distribution table." const i16 *distribution = default_distributions[type]; const size_t symbs = default_distribution_lengths[type]; const size_t accuracy_log = default_distribution_accuracies[type]; FSE_init_dtable(table, distribution, symbs, accuracy_log); break; } case seq_rle: { // "RLE_Mode : it's a single code, repeated Number_of_Sequences times." const u8 symb = IO_get_read_ptr(in, 1)[0]; FSE_init_dtable_rle(table, symb); break; } case seq_fse: { // "FSE_Compressed_Mode : standard FSE compression. A distribution table // will be present " FSE_decode_header(table, in, max_accuracies[type]); break; } case seq_repeat: // "Repeat_Mode : re-use distribution table from previous compressed // block." // Nothing to do here, table will be unchanged if (!table->symbols) { // This mode is invalid if we don't already have a table CORRUPTION(); } break; default: // Impossible, as mode is from 0-3 IMPOSSIBLE(); break; } } /******* END SEQUENCE DECODING ************************************************/ /******* SEQUENCE EXECUTION ***************************************************/ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, const u8 *const literals, const size_t literals_len, const sequence_command_t *const sequences, const size_t num_sequences) { istream_t litstream = IO_make_istream(literals, literals_len); u64 *const offset_hist = ctx->previous_offsets; size_t total_output = ctx->current_total_output; for (size_t i = 0; i < num_sequences; i++) { const sequence_command_t seq = sequences[i]; { const u32 literals_size = copy_literals(seq.literal_length, &litstream, out); total_output += literals_size; } size_t const offset = compute_offset(seq, offset_hist); size_t const match_length = seq.match_length; execute_match_copy(ctx, offset, match_length, total_output, out); total_output += match_length; } // Copy any leftover literals { size_t len = IO_istream_len(&litstream); - copy_literals(len, &litstream, out); + copy_literals(len, &litstream, out); total_output += len; } ctx->current_total_output = total_output; } static u32 copy_literals(const size_t literal_length, istream_t *litstream, ostream_t *const out) { // If the sequence asks for more literals than are left, the // sequence must be corrupted if (literal_length > IO_istream_len(litstream)) { CORRUPTION(); } u8 *const write_ptr = IO_get_write_ptr(out, literal_length); const u8 *const read_ptr = IO_get_read_ptr(litstream, literal_length); // Copy literals to output memcpy(write_ptr, read_ptr, literal_length); return literal_length; } static size_t compute_offset(sequence_command_t seq, u64 *const offset_hist) { size_t offset; // Offsets are special, we need to handle the repeat offsets if (seq.offset <= 3) { // "The first 3 values define a repeated offset and we will call // them Repeated_Offset1, Repeated_Offset2, and Repeated_Offset3. // They are sorted in recency order, with Repeated_Offset1 meaning // 'most recent one'". // Use 0 indexing for the array u32 idx = seq.offset - 1; if (seq.literal_length == 0) { // "There is an exception though, when current sequence's // literals length is 0. In this case, repeated offsets are // shifted by one, so Repeated_Offset1 becomes Repeated_Offset2, // Repeated_Offset2 becomes Repeated_Offset3, and // Repeated_Offset3 becomes Repeated_Offset1 - 1_byte." idx++; } if (idx == 0) { offset = offset_hist[0]; } else { // If idx == 3 then literal length was 0 and the offset was 3, // as per the exception listed above offset = idx < 3 ? offset_hist[idx] : offset_hist[0] - 1; // If idx == 1 we don't need to modify offset_hist[2], since // we're using the second-most recent code if (idx > 1) { offset_hist[2] = offset_hist[1]; } offset_hist[1] = offset_hist[0]; offset_hist[0] = offset; } } else { // When it's not a repeat offset: // "if (Offset_Value > 3) offset = Offset_Value - 3;" offset = seq.offset - 3; // Shift back history offset_hist[2] = offset_hist[1]; offset_hist[1] = offset_hist[0]; offset_hist[0] = offset; } return offset; } static void execute_match_copy(frame_context_t *const ctx, size_t offset, size_t match_length, size_t total_output, ostream_t *const out) { u8 *write_ptr = IO_get_write_ptr(out, match_length); if (total_output <= ctx->header.window_size) { // In this case offset might go back into the dictionary if (offset > total_output + ctx->dict_content_len) { // The offset goes beyond even the dictionary CORRUPTION(); } if (offset > total_output) { // "The rest of the dictionary is its content. The content act // as a "past" in front of data to compress or decompress, so it // can be referenced in sequence commands." const size_t dict_copy = MIN(offset - total_output, match_length); const size_t dict_offset = ctx->dict_content_len - (offset - total_output); memcpy(write_ptr, ctx->dict_content + dict_offset, dict_copy); write_ptr += dict_copy; match_length -= dict_copy; } } else if (offset > ctx->header.window_size) { CORRUPTION(); } // We must copy byte by byte because the match length might be larger // than the offset // ex: if the output so far was "abc", a command with offset=3 and // match_length=6 would produce "abcabcabc" as the new output for (size_t j = 0; j < match_length; j++) { *write_ptr = *(write_ptr - offset); write_ptr++; } } /******* END SEQUENCE EXECUTION ***********************************************/ /******* OUTPUT SIZE COUNTING *************************************************/ /// Get the decompressed size of an input stream so memory can be allocated in /// advance. /// This implementation assumes `src` points to a single ZSTD-compressed frame size_t ZSTD_get_decompressed_size(const void *src, const size_t src_len) { istream_t in = IO_make_istream(src, src_len); // get decompressed size from ZSTD frame header { const u32 magic_number = IO_read_bits(&in, 32); if (magic_number == 0xFD2FB528U) { // ZSTD frame frame_header_t header; parse_frame_header(&header, &in); if (header.frame_content_size == 0 && !header.single_segment_flag) { // Content size not provided, we can't tell return -1; } return header.frame_content_size; } else { // not a real frame or skippable frame ERROR("ZSTD frame magic number did not match"); } } } /******* END OUTPUT SIZE COUNTING *********************************************/ /******* DICTIONARY PARSING ***************************************************/ #define DICT_SIZE_ERROR() ERROR("Dictionary size cannot be less than 8 bytes") #define NULL_SRC() ERROR("Tried to create dictionary with pointer to null src"); dictionary_t* create_dictionary() { dictionary_t* dict = calloc(1, sizeof(dictionary_t)); if (!dict) { BAD_ALLOC(); } return dict; } static void init_dictionary_content(dictionary_t *const dict, istream_t *const in); void parse_dictionary(dictionary_t *const dict, const void *src, size_t src_len) { const u8 *byte_src = (const u8 *)src; memset(dict, 0, sizeof(dictionary_t)); if (src == NULL) { /* cannot initialize dictionary with null src */ NULL_SRC(); } if (src_len < 8) { DICT_SIZE_ERROR(); } istream_t in = IO_make_istream(byte_src, src_len); const u32 magic_number = IO_read_bits(&in, 32); if (magic_number != 0xEC30A437) { // raw content dict IO_rewind_bits(&in, 32); init_dictionary_content(dict, &in); return; } dict->dictionary_id = IO_read_bits(&in, 32); // "Entropy_Tables : following the same format as the tables in compressed // blocks. They are stored in following order : Huffman tables for literals, // FSE table for offsets, FSE table for match lengths, and FSE table for // literals lengths. It's finally followed by 3 offset values, populating // recent offsets (instead of using {1,4,8}), stored in order, 4-bytes // little-endian each, for a total of 12 bytes. Each recent offset must have // a value < dictionary size." decode_huf_table(&dict->literals_dtable, &in); decode_seq_table(&dict->of_dtable, &in, seq_offset, seq_fse); decode_seq_table(&dict->ml_dtable, &in, seq_match_length, seq_fse); decode_seq_table(&dict->ll_dtable, &in, seq_literal_length, seq_fse); // Read in the previous offset history dict->previous_offsets[0] = IO_read_bits(&in, 32); dict->previous_offsets[1] = IO_read_bits(&in, 32); dict->previous_offsets[2] = IO_read_bits(&in, 32); // Ensure the provided offsets aren't too large // "Each recent offset must have a value < dictionary size." for (int i = 0; i < 3; i++) { if (dict->previous_offsets[i] > src_len) { ERROR("Dictionary corrupted"); } } // "Content : The rest of the dictionary is its content. The content act as // a "past" in front of data to compress or decompress, so it can be // referenced in sequence commands." init_dictionary_content(dict, &in); } static void init_dictionary_content(dictionary_t *const dict, istream_t *const in) { // Copy in the content dict->content_size = IO_istream_len(in); dict->content = malloc(dict->content_size); if (!dict->content) { BAD_ALLOC(); } const u8 *const content = IO_get_read_ptr(in, dict->content_size); memcpy(dict->content, content, dict->content_size); } /// Free an allocated dictionary void free_dictionary(dictionary_t *const dict) { HUF_free_dtable(&dict->literals_dtable); FSE_free_dtable(&dict->ll_dtable); FSE_free_dtable(&dict->of_dtable); FSE_free_dtable(&dict->ml_dtable); free(dict->content); memset(dict, 0, sizeof(dictionary_t)); free(dict); } /******* END DICTIONARY PARSING ***********************************************/ /******* IO STREAM OPERATIONS *************************************************/ #define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream") /// Reads `num` bits from a bitstream, and updates the internal offset static inline u64 IO_read_bits(istream_t *const in, const int num_bits) { if (num_bits > 64 || num_bits <= 0) { ERROR("Attempt to read an invalid number of bits"); } const size_t bytes = (num_bits + in->bit_offset + 7) / 8; const size_t full_bytes = (num_bits + in->bit_offset) / 8; if (bytes > in->len) { INP_SIZE(); } const u64 result = read_bits_LE(in->ptr, num_bits, in->bit_offset); in->bit_offset = (num_bits + in->bit_offset) % 8; in->ptr += full_bytes; in->len -= full_bytes; return result; } /// If a non-zero number of bits have been read from the current byte, advance /// the offset to the next byte static inline void IO_rewind_bits(istream_t *const in, int num_bits) { if (num_bits < 0) { ERROR("Attempting to rewind stream by a negative number of bits"); } // move the offset back by `num_bits` bits const int new_offset = in->bit_offset - num_bits; // determine the number of whole bytes we have to rewind, rounding up to an // integer number (e.g. if `new_offset == -5`, `bytes == 1`) const i64 bytes = -(new_offset - 7) / 8; in->ptr -= bytes; in->len += bytes; // make sure the resulting `bit_offset` is positive, as mod in C does not // convert numbers from negative to positive (e.g. -22 % 8 == -6) in->bit_offset = ((new_offset % 8) + 8) % 8; } /// If the remaining bits in a byte will be unused, advance to the end of the /// byte static inline void IO_align_stream(istream_t *const in) { if (in->bit_offset != 0) { if (in->len == 0) { INP_SIZE(); } in->ptr++; in->len--; in->bit_offset = 0; } } /// Write the given byte into the output stream static inline void IO_write_byte(ostream_t *const out, u8 symb) { if (out->len == 0) { OUT_SIZE(); } out->ptr[0] = symb; out->ptr++; out->len--; } /// Returns the number of bytes left to be read in this stream. The stream must /// be byte aligned. static inline size_t IO_istream_len(const istream_t *const in) { return in->len; } /// Returns a pointer where `len` bytes can be read, and advances the internal /// state. The stream must be byte aligned. static inline const u8 *IO_get_read_ptr(istream_t *const in, size_t len) { if (len > in->len) { INP_SIZE(); } if (in->bit_offset != 0) { UNALIGNED(); } const u8 *const ptr = in->ptr; in->ptr += len; in->len -= len; return ptr; } /// Returns a pointer to write `len` bytes to, and advances the internal state static inline u8 *IO_get_write_ptr(ostream_t *const out, size_t len) { if (len > out->len) { OUT_SIZE(); } u8 *const ptr = out->ptr; out->ptr += len; out->len -= len; return ptr; } /// Advance the inner state by `len` bytes static inline void IO_advance_input(istream_t *const in, size_t len) { if (len > in->len) { INP_SIZE(); } if (in->bit_offset != 0) { UNALIGNED(); } in->ptr += len; in->len -= len; } /// Returns an `ostream_t` constructed from the given pointer and length static inline ostream_t IO_make_ostream(u8 *out, size_t len) { return (ostream_t) { out, len }; } /// Returns an `istream_t` constructed from the given pointer and length static inline istream_t IO_make_istream(const u8 *in, size_t len) { return (istream_t) { in, len, 0 }; } /// Returns an `istream_t` with the same base as `in`, and length `len` /// Then, advance `in` to account for the consumed bytes /// `in` must be byte aligned static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len) { // Consume `len` bytes of the parent stream const u8 *const ptr = IO_get_read_ptr(in, len); // Make a substream using the pointer to those `len` bytes return IO_make_istream(ptr, len); } /******* END IO STREAM OPERATIONS *********************************************/ /******* BITSTREAM OPERATIONS *************************************************/ /// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits static inline u64 read_bits_LE(const u8 *src, const int num_bits, const size_t offset) { if (num_bits > 64) { ERROR("Attempt to read an invalid number of bits"); } // Skip over bytes that aren't in range src += offset / 8; size_t bit_offset = offset % 8; u64 res = 0; int shift = 0; int left = num_bits; while (left > 0) { u64 mask = left >= 8 ? 0xff : (((u64)1 << left) - 1); // Read the next byte, shift it to account for the offset, and then mask // out the top part if we don't need all the bits res += (((u64)*src++ >> bit_offset) & mask) << shift; shift += 8 - bit_offset; left -= 8 - bit_offset; bit_offset = 0; } return res; } /// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so /// it updates `offset` to `offset - bits`, and then reads `bits` bits from /// `src + offset`. If the offset becomes negative, the extra bits at the /// bottom are filled in with `0` bits instead of reading from before `src`. static inline u64 STREAM_read_bits(const u8 *const src, const int bits, i64 *const offset) { *offset = *offset - bits; size_t actual_off = *offset; size_t actual_bits = bits; // Don't actually read bits from before the start of src, so if `*offset < // 0` fix actual_off and actual_bits to reflect the quantity to read if (*offset < 0) { actual_bits += *offset; actual_off = 0; } u64 res = read_bits_LE(src, actual_bits, actual_off); if (*offset < 0) { // Fill in the bottom "overflowed" bits with 0's res = -*offset >= 64 ? 0 : (res << -*offset); } return res; } /******* END BITSTREAM OPERATIONS *********************************************/ /******* BIT COUNTING OPERATIONS **********************************************/ /// Returns `x`, where `2^x` is the largest power of 2 less than or equal to /// `num`, or `-1` if `num == 0`. static inline int highest_set_bit(const u64 num) { for (int i = 63; i >= 0; i--) { if (((u64)1 << i) <= num) { return i; } } return -1; } /******* END BIT COUNTING OPERATIONS ******************************************/ /******* HUFFMAN PRIMITIVES ***************************************************/ static inline u8 HUF_decode_symbol(const HUF_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset) { // Look up the symbol and number of bits to read const u8 symb = dtable->symbols[*state]; const u8 bits = dtable->num_bits[*state]; const u16 rest = STREAM_read_bits(src, bits, offset); // Shift `bits` bits out of the state, keeping the low order bits that // weren't necessary to determine this symbol. Then add in the new bits // read from the stream. *state = ((*state << bits) + rest) & (((u16)1 << dtable->max_bits) - 1); return symb; } static inline void HUF_init_state(const HUF_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset) { // Read in a full `dtable->max_bits` bits to initialize the state const u8 bits = dtable->max_bits; *state = STREAM_read_bits(src, bits, offset); } static size_t HUF_decompress_1stream(const HUF_dtable *const dtable, ostream_t *const out, istream_t *const in) { const size_t len = IO_istream_len(in); if (len == 0) { INP_SIZE(); } const u8 *const src = IO_get_read_ptr(in, len); // "Each bitstream must be read backward, that is starting from the end down // to the beginning. Therefore it's necessary to know the size of each // bitstream. // // It's also necessary to know exactly which bit is the latest. This is // detected by a final bit flag : the highest bit of latest byte is a // final-bit-flag. Consequently, a last byte of 0 is not possible. And the // final-bit-flag itself is not part of the useful bitstream. Hence, the // last byte contains between 0 and 7 useful bits." const int padding = 8 - highest_set_bit(src[len - 1]); // Offset starts at the end because HUF streams are read backwards i64 bit_offset = len * 8 - padding; u16 state; HUF_init_state(dtable, &state, src, &bit_offset); size_t symbols_written = 0; while (bit_offset > -dtable->max_bits) { // Iterate over the stream, decoding one symbol at a time IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &bit_offset)); symbols_written++; } // "The process continues up to reading the required number of symbols per // stream. If a bitstream is not entirely and exactly consumed, hence // reaching exactly its beginning position with all bits consumed, the // decoding process is considered faulty." // When all symbols have been decoded, the final state value shouldn't have // any data from the stream, so it should have "read" dtable->max_bits from // before the start of `src` // Therefore `offset`, the edge to start reading new bits at, should be // dtable->max_bits before the start of the stream if (bit_offset != -dtable->max_bits) { CORRUPTION(); } return symbols_written; } static size_t HUF_decompress_4stream(const HUF_dtable *const dtable, ostream_t *const out, istream_t *const in) { // "Compressed size is provided explicitly : in the 4-streams variant, // bitstreams are preceded by 3 unsigned little-endian 16-bits values. Each // value represents the compressed size of one stream, in order. The last // stream size is deducted from total compressed size and from previously // decoded stream sizes" const size_t csize1 = IO_read_bits(in, 16); const size_t csize2 = IO_read_bits(in, 16); const size_t csize3 = IO_read_bits(in, 16); istream_t in1 = IO_make_sub_istream(in, csize1); istream_t in2 = IO_make_sub_istream(in, csize2); istream_t in3 = IO_make_sub_istream(in, csize3); istream_t in4 = IO_make_sub_istream(in, IO_istream_len(in)); size_t total_output = 0; // Decode each stream independently for simplicity // If we wanted to we could decode all 4 at the same time for speed, // utilizing more execution units total_output += HUF_decompress_1stream(dtable, out, &in1); total_output += HUF_decompress_1stream(dtable, out, &in2); total_output += HUF_decompress_1stream(dtable, out, &in3); total_output += HUF_decompress_1stream(dtable, out, &in4); return total_output; } /// Initializes a Huffman table using canonical Huffman codes /// For more explanation on canonical Huffman codes see /// http://www.cs.uofs.edu/~mccloske/courses/cmps340/huff_canonical_dec2015.html /// Codes within a level are allocated in symbol order (i.e. smaller symbols get /// earlier codes) static void HUF_init_dtable(HUF_dtable *const table, const u8 *const bits, const int num_symbs) { memset(table, 0, sizeof(HUF_dtable)); if (num_symbs > HUF_MAX_SYMBS) { ERROR("Too many symbols for Huffman"); } u8 max_bits = 0; u16 rank_count[HUF_MAX_BITS + 1]; memset(rank_count, 0, sizeof(rank_count)); // Count the number of symbols for each number of bits, and determine the // depth of the tree for (int i = 0; i < num_symbs; i++) { if (bits[i] > HUF_MAX_BITS) { ERROR("Huffman table depth too large"); } max_bits = MAX(max_bits, bits[i]); rank_count[bits[i]]++; } const size_t table_size = 1 << max_bits; table->max_bits = max_bits; table->symbols = malloc(table_size); table->num_bits = malloc(table_size); if (!table->symbols || !table->num_bits) { free(table->symbols); free(table->num_bits); BAD_ALLOC(); } // "Symbols are sorted by Weight. Within same Weight, symbols keep natural // order. Symbols with a Weight of zero are removed. Then, starting from // lowest weight, prefix codes are distributed in order." u32 rank_idx[HUF_MAX_BITS + 1]; // Initialize the starting codes for each rank (number of bits) rank_idx[max_bits] = 0; for (int i = max_bits; i >= 1; i--) { rank_idx[i - 1] = rank_idx[i] + rank_count[i] * (1 << (max_bits - i)); // The entire range takes the same number of bits so we can memset it memset(&table->num_bits[rank_idx[i]], i, rank_idx[i - 1] - rank_idx[i]); } if (rank_idx[0] != table_size) { CORRUPTION(); } // Allocate codes and fill in the table for (int i = 0; i < num_symbs; i++) { if (bits[i] != 0) { // Allocate a code for this symbol and set its range in the table const u16 code = rank_idx[bits[i]]; // Since the code doesn't care about the bottom `max_bits - bits[i]` // bits of state, it gets a range that spans all possible values of // the lower bits const u16 len = 1 << (max_bits - bits[i]); memset(&table->symbols[code], i, len); rank_idx[bits[i]] += len; } } } static void HUF_init_dtable_usingweights(HUF_dtable *const table, const u8 *const weights, const int num_symbs) { // +1 because the last weight is not transmitted in the header if (num_symbs + 1 > HUF_MAX_SYMBS) { ERROR("Too many symbols for Huffman"); } u8 bits[HUF_MAX_SYMBS]; u64 weight_sum = 0; for (int i = 0; i < num_symbs; i++) { // Weights are in the same range as bit count if (weights[i] > HUF_MAX_BITS) { CORRUPTION(); } weight_sum += weights[i] > 0 ? (u64)1 << (weights[i] - 1) : 0; } // Find the first power of 2 larger than the sum const int max_bits = highest_set_bit(weight_sum) + 1; const u64 left_over = ((u64)1 << max_bits) - weight_sum; // If the left over isn't a power of 2, the weights are invalid if (left_over & (left_over - 1)) { CORRUPTION(); } // left_over is used to find the last weight as it's not transmitted // by inverting 2^(weight - 1) we can determine the value of last_weight const int last_weight = highest_set_bit(left_over) + 1; for (int i = 0; i < num_symbs; i++) { // "Number_of_Bits = Number_of_Bits ? Max_Number_of_Bits + 1 - Weight : 0" bits[i] = weights[i] > 0 ? (max_bits + 1 - weights[i]) : 0; } bits[num_symbs] = max_bits + 1 - last_weight; // Last weight is always non-zero HUF_init_dtable(table, bits, num_symbs + 1); } static void HUF_free_dtable(HUF_dtable *const dtable) { free(dtable->symbols); free(dtable->num_bits); memset(dtable, 0, sizeof(HUF_dtable)); } static void HUF_copy_dtable(HUF_dtable *const dst, const HUF_dtable *const src) { if (src->max_bits == 0) { memset(dst, 0, sizeof(HUF_dtable)); return; } const size_t size = (size_t)1 << src->max_bits; dst->max_bits = src->max_bits; dst->symbols = malloc(size); dst->num_bits = malloc(size); if (!dst->symbols || !dst->num_bits) { BAD_ALLOC(); } memcpy(dst->symbols, src->symbols, size); memcpy(dst->num_bits, src->num_bits, size); } /******* END HUFFMAN PRIMITIVES ***********************************************/ /******* FSE PRIMITIVES *******************************************************/ /// For more description of FSE see /// https://github.com/Cyan4973/FiniteStateEntropy/ /// Allow a symbol to be decoded without updating state static inline u8 FSE_peek_symbol(const FSE_dtable *const dtable, const u16 state) { return dtable->symbols[state]; } /// Consumes bits from the input and uses the current state to determine the /// next state static inline void FSE_update_state(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset) { const u8 bits = dtable->num_bits[*state]; const u16 rest = STREAM_read_bits(src, bits, offset); *state = dtable->new_state_base[*state] + rest; } /// Decodes a single FSE symbol and updates the offset static inline u8 FSE_decode_symbol(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset) { const u8 symb = FSE_peek_symbol(dtable, *state); FSE_update_state(dtable, state, src, offset); return symb; } static inline void FSE_init_state(const FSE_dtable *const dtable, u16 *const state, const u8 *const src, i64 *const offset) { // Read in a full `accuracy_log` bits to initialize the state const u8 bits = dtable->accuracy_log; *state = STREAM_read_bits(src, bits, offset); } static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable, ostream_t *const out, istream_t *const in) { const size_t len = IO_istream_len(in); if (len == 0) { INP_SIZE(); } const u8 *const src = IO_get_read_ptr(in, len); // "Each bitstream must be read backward, that is starting from the end down // to the beginning. Therefore it's necessary to know the size of each // bitstream. // // It's also necessary to know exactly which bit is the latest. This is // detected by a final bit flag : the highest bit of latest byte is a // final-bit-flag. Consequently, a last byte of 0 is not possible. And the // final-bit-flag itself is not part of the useful bitstream. Hence, the // last byte contains between 0 and 7 useful bits." const int padding = 8 - highest_set_bit(src[len - 1]); i64 offset = len * 8 - padding; u16 state1, state2; // "The first state (State1) encodes the even indexed symbols, and the // second (State2) encodes the odd indexes. State1 is initialized first, and // then State2, and they take turns decoding a single symbol and updating // their state." FSE_init_state(dtable, &state1, src, &offset); FSE_init_state(dtable, &state2, src, &offset); // Decode until we overflow the stream // Since we decode in reverse order, overflowing the stream is offset going // negative size_t symbols_written = 0; while (1) { // "The number of symbols to decode is determined by tracking bitStream // overflow condition: If updating state after decoding a symbol would // require more bits than remain in the stream, it is assumed the extra // bits are 0. Then, the symbols for each of the final states are // decoded and the process is complete." IO_write_byte(out, FSE_decode_symbol(dtable, &state1, src, &offset)); symbols_written++; if (offset < 0) { // There's still a symbol to decode in state2 IO_write_byte(out, FSE_peek_symbol(dtable, state2)); symbols_written++; break; } IO_write_byte(out, FSE_decode_symbol(dtable, &state2, src, &offset)); symbols_written++; if (offset < 0) { // There's still a symbol to decode in state1 IO_write_byte(out, FSE_peek_symbol(dtable, state1)); symbols_written++; break; } } return symbols_written; } static void FSE_init_dtable(FSE_dtable *const dtable, const i16 *const norm_freqs, const int num_symbs, const int accuracy_log) { if (accuracy_log > FSE_MAX_ACCURACY_LOG) { ERROR("FSE accuracy too large"); } if (num_symbs > FSE_MAX_SYMBS) { ERROR("Too many symbols for FSE"); } dtable->accuracy_log = accuracy_log; const size_t size = (size_t)1 << accuracy_log; dtable->symbols = malloc(size * sizeof(u8)); dtable->num_bits = malloc(size * sizeof(u8)); dtable->new_state_base = malloc(size * sizeof(u16)); if (!dtable->symbols || !dtable->num_bits || !dtable->new_state_base) { BAD_ALLOC(); } // Used to determine how many bits need to be read for each state, // and where the destination range should start // Needs to be u16 because max value is 2 * max number of symbols, // which can be larger than a byte can store u16 state_desc[FSE_MAX_SYMBS]; // "Symbols are scanned in their natural order for "less than 1" // probabilities. Symbols with this probability are being attributed a // single cell, starting from the end of the table. These symbols define a // full state reset, reading Accuracy_Log bits." int high_threshold = size; for (int s = 0; s < num_symbs; s++) { // Scan for low probability symbols to put at the top if (norm_freqs[s] == -1) { dtable->symbols[--high_threshold] = s; state_desc[s] = 1; } } // "All remaining symbols are sorted in their natural order. Starting from // symbol 0 and table position 0, each symbol gets attributed as many cells // as its probability. Cell allocation is spreaded, not linear." // Place the rest in the table const u16 step = (size >> 1) + (size >> 3) + 3; const u16 mask = size - 1; u16 pos = 0; for (int s = 0; s < num_symbs; s++) { if (norm_freqs[s] <= 0) { continue; } state_desc[s] = norm_freqs[s]; for (int i = 0; i < norm_freqs[s]; i++) { // Give `norm_freqs[s]` states to symbol s dtable->symbols[pos] = s; // "A position is skipped if already occupied, typically by a "less // than 1" probability symbol." do { pos = (pos + step) & mask; } while (pos >= high_threshold); // Note: no other collision checking is necessary as `step` is // coprime to `size`, so the cycle will visit each position exactly // once } } if (pos != 0) { CORRUPTION(); } // Now we can fill baseline and num bits for (size_t i = 0; i < size; i++) { u8 symbol = dtable->symbols[i]; u16 next_state_desc = state_desc[symbol]++; // Fills in the table appropriately, next_state_desc increases by symbol // over time, decreasing number of bits dtable->num_bits[i] = (u8)(accuracy_log - highest_set_bit(next_state_desc)); // Baseline increases until the bit threshold is passed, at which point // it resets to 0 dtable->new_state_base[i] = ((u16)next_state_desc << dtable->num_bits[i]) - size; } } /// Decode an FSE header as defined in the Zstandard format specification and /// use the decoded frequencies to initialize a decoding table. static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in, const int max_accuracy_log) { // "An FSE distribution table describes the probabilities of all symbols // from 0 to the last present one (included) on a normalized scale of 1 << // Accuracy_Log . // // It's a bitstream which is read forward, in little-endian fashion. It's // not necessary to know its exact size, since it will be discovered and // reported by the decoding process. if (max_accuracy_log > FSE_MAX_ACCURACY_LOG) { ERROR("FSE accuracy too large"); } // The bitstream starts by reporting on which scale it operates. // Accuracy_Log = low4bits + 5. Note that maximum Accuracy_Log for literal // and match lengths is 9, and for offsets is 8. Higher values are // considered errors." const int accuracy_log = 5 + IO_read_bits(in, 4); if (accuracy_log > max_accuracy_log) { ERROR("FSE accuracy too large"); } // "Then follows each symbol value, from 0 to last present one. The number // of bits used by each field is variable. It depends on : // // Remaining probabilities + 1 : example : Presuming an Accuracy_Log of 8, // and presuming 100 probabilities points have already been distributed, the // decoder may read any value from 0 to 255 - 100 + 1 == 156 (inclusive). // Therefore, it must read log2sup(156) == 8 bits. // // Value decoded : small values use 1 less bit : example : Presuming values // from 0 to 156 (inclusive) are possible, 255-156 = 99 values are remaining // in an 8-bits field. They are used this way : first 99 values (hence from // 0 to 98) use only 7 bits, values from 99 to 156 use 8 bits. " i32 remaining = 1 << accuracy_log; i16 frequencies[FSE_MAX_SYMBS]; int symb = 0; while (remaining > 0 && symb < FSE_MAX_SYMBS) { // Log of the number of possible values we could read int bits = highest_set_bit(remaining + 1) + 1; u16 val = IO_read_bits(in, bits); // Try to mask out the lower bits to see if it qualifies for the "small // value" threshold const u16 lower_mask = ((u16)1 << (bits - 1)) - 1; const u16 threshold = ((u16)1 << bits) - 1 - (remaining + 1); if ((val & lower_mask) < threshold) { IO_rewind_bits(in, 1); val = val & lower_mask; } else if (val > lower_mask) { val = val - threshold; } // "Probability is obtained from Value decoded by following formula : // Proba = value - 1" const i16 proba = (i16)val - 1; // "It means value 0 becomes negative probability -1. -1 is a special // probability, which means "less than 1". Its effect on distribution // table is described in next paragraph. For the purpose of calculating // cumulated distribution, it counts as one." remaining -= proba < 0 ? -proba : proba; frequencies[symb] = proba; symb++; // "When a symbol has a probability of zero, it is followed by a 2-bits // repeat flag. This repeat flag tells how many probabilities of zeroes // follow the current one. It provides a number ranging from 0 to 3. If // it is a 3, another 2-bits repeat flag follows, and so on." if (proba == 0) { // Read the next two bits to see how many more 0s int repeat = IO_read_bits(in, 2); while (1) { for (int i = 0; i < repeat && symb < FSE_MAX_SYMBS; i++) { frequencies[symb++] = 0; } if (repeat == 3) { repeat = IO_read_bits(in, 2); } else { break; } } } } IO_align_stream(in); // "When last symbol reaches cumulated total of 1 << Accuracy_Log, decoding // is complete. If the last symbol makes cumulated total go above 1 << // Accuracy_Log, distribution is considered corrupted." if (remaining != 0 || symb >= FSE_MAX_SYMBS) { CORRUPTION(); } // Initialize the decoding table using the determined weights FSE_init_dtable(dtable, frequencies, symb, accuracy_log); } static void FSE_init_dtable_rle(FSE_dtable *const dtable, const u8 symb) { dtable->symbols = malloc(sizeof(u8)); dtable->num_bits = malloc(sizeof(u8)); dtable->new_state_base = malloc(sizeof(u16)); if (!dtable->symbols || !dtable->num_bits || !dtable->new_state_base) { BAD_ALLOC(); } // This setup will always have a state of 0, always return symbol `symb`, // and never consume any bits dtable->symbols[0] = symb; dtable->num_bits[0] = 0; dtable->new_state_base[0] = 0; dtable->accuracy_log = 0; } static void FSE_free_dtable(FSE_dtable *const dtable) { free(dtable->symbols); free(dtable->num_bits); free(dtable->new_state_base); memset(dtable, 0, sizeof(FSE_dtable)); } static void FSE_copy_dtable(FSE_dtable *const dst, const FSE_dtable *const src) { if (src->accuracy_log == 0) { memset(dst, 0, sizeof(FSE_dtable)); return; } size_t size = (size_t)1 << src->accuracy_log; dst->accuracy_log = src->accuracy_log; dst->symbols = malloc(size); dst->num_bits = malloc(size); dst->new_state_base = malloc(size * sizeof(u16)); if (!dst->symbols || !dst->num_bits || !dst->new_state_base) { BAD_ALLOC(); } memcpy(dst->symbols, src->symbols, size); memcpy(dst->num_bits, src->num_bits, size); memcpy(dst->new_state_base, src->new_state_base, size * sizeof(u16)); } /******* END FSE PRIMITIVES ***************************************************/ Index: vendor/zstd/dist/doc/educational_decoder/zstd_decompress.h =================================================================== --- vendor/zstd/dist/doc/educational_decoder/zstd_decompress.h (revision 325596) +++ vendor/zstd/dist/doc/educational_decoder/zstd_decompress.h (revision 325597) @@ -1,58 +1,58 @@ /* - * Copyright (c) 2017-present, Facebook, Inc. + * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). */ /******* EXPOSED TYPES ********************************************************/ /* * Contains the parsed contents of a dictionary * This includes Huffman and FSE tables used for decoding and data on offsets */ typedef struct dictionary_s dictionary_t; /******* END EXPOSED TYPES ****************************************************/ /******* DECOMPRESSION FUNCTIONS **********************************************/ /// Zstandard decompression functions. /// `dst` must point to a space at least as large as the reconstructed output. size_t ZSTD_decompress(void *const dst, const size_t dst_len, const void *const src, const size_t src_len); /// If `dict != NULL` and `dict_len >= 8`, does the same thing as /// `ZSTD_decompress` but uses the provided dict size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len, const void *const src, const size_t src_len, dictionary_t* parsed_dict); /// Get the decompressed size of an input stream so memory can be allocated in /// advance /// Returns -1 if the size can't be determined /// Assumes decompression of a single frame size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len); /******* END DECOMPRESSION FUNCTIONS ******************************************/ /******* DICTIONARY MANAGEMENT ***********************************************/ /* * Return a valid dictionary_t pointer for use with dictionary initialization * or decompression */ dictionary_t* create_dictionary(); /* * Parse a provided dictionary blob for use in decompression * `src` -- must point to memory space representing the dictionary * `src_len` -- must provide the dictionary size * `dict` -- will contain the parsed contents of the dictionary and * can be used for decompression */ void parse_dictionary(dictionary_t *const dict, const void *src, size_t src_len); /* * Free internal Huffman tables, FSE tables, and dictionary content */ void free_dictionary(dictionary_t *const dict); /******* END DICTIONARY MANAGEMENT *******************************************/ Index: vendor/zstd/dist/doc/zstd_manual.html =================================================================== --- vendor/zstd/dist/doc/zstd_manual.html (revision 325596) +++ vendor/zstd/dist/doc/zstd_manual.html (revision 325597) @@ -1,983 +1,1200 @@ -zstd 1.3.1 Manual +zstd 1.3.2 Manual -

zstd 1.3.1 Manual

+

zstd 1.3.2 Manual


Contents

  1. Introduction
  2. Version
  3. Simple API
  4. Explicit memory management
  5. Simple dictionary API
  6. Bulk processing dictionary API
  7. Streaming
  8. Streaming compression - HowTo
  9. Streaming decompression - HowTo
  10. START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
  11. Advanced types
  12. Frame size functions
  13. Context memory usage
  14. Advanced compression functions
  15. Advanced decompression functions
  16. Advanced streaming functions
  17. Buffer-less and synchronous inner streaming functions
  18. Buffer-less streaming compression (synchronous mode)
  19. Buffer-less streaming decompression (synchronous mode)
  20. -
  21. Block functions
  22. +
  23. New advanced API (experimental)
  24. +
  25. Block level API

Introduction

   zstd, short for Zstandard, is a fast lossless compression algorithm,
   targeting real-time compression scenarios at zlib-level and better compression ratios.
   The zstd compression library provides in-memory compression and decompression functions.
   The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
   Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
   Compression can be done in:
     - a single step (described as Simple API)
     - a single step, reusing a context (described as Explicit memory management)
     - unbounded multiple steps (described as Streaming compression)
   The compression ratio achievable on small data can be highly improved using a dictionary in:
     - a single step (described as Simple dictionary API)
     - a single step, reusing a dictionary (described as Fast dictionary API)
 
   Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
   Advanced experimental APIs shall never be used with a dynamic library.
   They are not "stable", their definition may change in the future. Only static linking is allowed.
 

Version


 
 
unsigned ZSTD_versionNumber(void);   /**< useful to check dll version */
 

Simple API


 
 
size_t ZSTD_compress( void* dst, size_t dstCapacity,
                 const void* src, size_t srcSize,
                       int compressionLevel);
 

Compresses `src` content as a single zstd compressed frame into already allocated `dst`. Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. @return : compressed size written into `dst` (<= `dstCapacity), or an error code if it fails (which can be tested using ZSTD_isError()).


size_t ZSTD_decompress( void* dst, size_t dstCapacity,
                   const void* src, size_t compressedSize);
 

`compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. `dstCapacity` is an upper bound of originalSize to regenerate. If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), or an errorCode if it fails (which can be tested using ZSTD_isError()).


#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
 #define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
 unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
 

`src` should point to the start of a ZSTD encoded frame. `srcSize` must be at least as large as the frame header. hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. @return : - decompressed size of the frame in `src`, if known - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) note 1 : a 0 return value means the frame is valid but "empty". note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. In which case, it's necessary to use streaming mode to decompress data. Optionally, application can rely on some implicit limit, as ZSTD_decompress() only needs an upper bound of decompressed size. (For example, data could be necessarily cut into blocks <= 16 KB). note 3 : decompressed size is always present when compression is done with ZSTD_compress() note 4 : decompressed size can be very large (64-bits value), potentially larger than what local system can handle as a single memory segment. In which case, it's necessary to use streaming mode to decompress data. note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. Always ensure return value fits within application's authorized limits. Each application can set its own limits. note 6 : This function replaces ZSTD_getDecompressedSize()


unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
 

NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). Both functions work the same way, but ZSTD_getDecompressedSize() blends "empty", "unknown" and "error" results in the same return value (0), while ZSTD_getFrameContentSize() distinguishes them. 'src' is the start of a zstd compressed frame. @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise.


-

Helper functions

int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
+

Helper functions

#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < 128 KB) ? ((128 KB - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
 size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case scenario */
 unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
 const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
+int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
 

Explicit memory management


 
 

Compression context

  When compressing many times,
   it is recommended to allocate a context just once, and re-use it for each successive compression operation.
   This will make workload friendlier for system's memory.
   Use one context per thread for parallel execution in multi-threaded environments. 
 
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
 ZSTD_CCtx* ZSTD_createCCtx(void);
 size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
 

size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
                          void* dst, size_t dstCapacity,
                    const void* src, size_t srcSize,
                          int compressionLevel);
 

Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()).


Decompression context

  When decompressing many times,
   it is recommended to allocate a context only once,
   and re-use it for each successive compression operation.
   This will make workload friendlier for system's memory.
   Use one context per thread for parallel execution. 
 
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
 ZSTD_DCtx* ZSTD_createDCtx(void);
 size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
 

size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
                            void* dst, size_t dstCapacity,
                      const void* src, size_t srcSize);
 

Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx())


Simple dictionary API


 
 
size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
                                void* dst, size_t dstCapacity,
                          const void* src, size_t srcSize,
                          const void* dict,size_t dictSize,
                                int compressionLevel);
 

Compression using a predefined Dictionary (see dictBuilder/zdict.h). Note : This function loads the dictionary, resulting in significant startup delay. Note : When `dict == NULL || dictSize < 8` no dictionary is used.


size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
                                  void* dst, size_t dstCapacity,
                            const void* src, size_t srcSize,
                            const void* dict,size_t dictSize);
 

Decompression using a predefined Dictionary (see dictBuilder/zdict.h). Dictionary must be identical to the one used during compression. Note : This function loads the dictionary, resulting in significant startup delay. Note : When `dict == NULL || dictSize < 8` no dictionary is used.


Bulk processing dictionary API


 
 
ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
                              int compressionLevel);
 

When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict


size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
 

Function frees memory allocated by ZSTD_createCDict().


size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
                                 void* dst, size_t dstCapacity,
                           const void* src, size_t srcSize,
                           const ZSTD_CDict* cdict);
 

Compression using a digested Dictionary. Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. Note that compression level is decided during dictionary creation. Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)


ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
 

Create a digested dictionary, ready to start decompression operation without startup delay. dictBuffer can be released after DDict creation, as its content is copied inside DDict


size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
 

Function frees memory allocated with ZSTD_createDDict()


size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
                                   void* dst, size_t dstCapacity,
                             const void* src, size_t srcSize,
                             const ZSTD_DDict* ddict);
 

Decompression using a digested Dictionary. Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times.


Streaming


 
 
typedef struct ZSTD_inBuffer_s {
   const void* src;    /**< start of input buffer */
   size_t size;        /**< size of input buffer */
   size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
 } ZSTD_inBuffer;
 

typedef struct ZSTD_outBuffer_s {
   void*  dst;         /**< start of output buffer */
   size_t size;        /**< size of output buffer */
   size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
 } ZSTD_outBuffer;
 

Streaming compression - HowTo

   A ZSTD_CStream object is required to track streaming operation.
   Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
   ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
   It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively,
   since it will play nicer with system's memory, by re-using already allocated memory.
   Use one separate ZSTD_CStream per thread for parallel execution.
 
   Start a new compression by initializing ZSTD_CStream.
   Use ZSTD_initCStream() to start a new compression operation.
   Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
 
   Use ZSTD_compressStream() repetitively to consume input stream.
   The function will automatically update both `pos` fields.
   Note that it may not consume the entire input, in which case `pos < size`,
   and it's up to the caller to present again remaining data.
   @return : a size hint, preferred nb of bytes to use as input for next function call
             or an error code, which can be tested using ZSTD_isError().
             Note 1 : it's just a hint, to help latency a little, any other value will work fine.
             Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
 
   At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
   `output->pos` will be updated.
   Note that some content might still be left within internal buffer if `output->size` is too small.
   @return : nb of bytes still present within internal buffer (0 if it's empty)
             or an error code, which can be tested using ZSTD_isError().
 
   ZSTD_endStream() instructs to finish a frame.
   It will perform a flush and write frame epilogue.
   The epilogue is required for decoders to consider a frame completed.
   ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
   In which case, call again ZSTD_endStream() to complete the flush.
   @return : 0 if frame fully completed and fully flushed,
              or >0 if some data is still present within internal buffer
                   (value is minimum size estimation for remaining data to flush, but it could be more)
             or an error code, which can be tested using ZSTD_isError().
 
  
 
typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
 

ZSTD_CStream management functions

ZSTD_CStream* ZSTD_createCStream(void);
 size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
 

Streaming compression functions

size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
 

size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
 

size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
 

Streaming decompression - HowTo

   A ZSTD_DStream object is required to track streaming operations.
   Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
   ZSTD_DStream objects can be re-used multiple times.
 
   Use ZSTD_initDStream() to start a new decompression operation,
    or ZSTD_initDStream_usingDict() if decompression requires a dictionary.
    @return : recommended first input size
 
   Use ZSTD_decompressStream() repetitively to consume your input.
   The function will update both `pos` fields.
   If `input.pos < input.size`, some input has not been consumed.
   It's up to the caller to present again remaining data.
   If `output.pos < output.size`, decoder has flushed everything it could.
   @return : 0 when a frame is completely decoded and fully flushed,
             an error code, which can be tested using ZSTD_isError(),
             any other value > 0, which means there is still some decoding to do to complete current frame.
             The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
  
 
typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
 

ZSTD_DStream management functions

ZSTD_DStream* ZSTD_createDStream(void);
 size_t ZSTD_freeDStream(ZSTD_DStream* zds);
 

Streaming decompression functions

size_t ZSTD_initDStream(ZSTD_DStream* zds);
 size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
 

size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
 

size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
 

START OF ADVANCED AND EXPERIMENTAL FUNCTIONS

 The definitions in this section are considered experimental.
  They should never be used with a dynamic library, as prototypes may change in the future.
  They are provided for advanced scenarios.
  Use them only in association with static linking.
  
 

Advanced types


 
 
typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
                ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy;   /* from faster to stronger */
 

typedef struct {
     unsigned windowLog;      /**< largest match distance : larger == more compression, more memory needed during decompression */
     unsigned chainLog;       /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
     unsigned hashLog;        /**< dispatch table : larger == faster, more memory */
     unsigned searchLog;      /**< nb of searches : larger == more compression, slower */
     unsigned searchLength;   /**< match length searched : larger == faster decompression, sometimes less compression */
     unsigned targetLength;   /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
     ZSTD_strategy strategy;
 } ZSTD_compressionParameters;
 

typedef struct {
     unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
     unsigned checksumFlag;    /**< 1: generate a 32-bits checksum at end of frame, for error detection */
     unsigned noDictIDFlag;    /**< 1: no dictID will be saved into frame header (if dictionary compression) */
 } ZSTD_frameParameters;
 

typedef struct {
     ZSTD_compressionParameters cParams;
     ZSTD_frameParameters fParams;
 } ZSTD_parameters;
 

Custom memory allocation functions

typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
 typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
 typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
 /* use this constant to defer to stdlib's functions */
 static const ZSTD_customMem ZSTD_defaultCMem = { NULL, NULL, NULL };
 

Frame size functions


 
 
size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
 

`src` should point to the start of a ZSTD encoded frame or skippable frame `srcSize` must be at least as large as the frame @return : the compressed size of the first frame starting at `src`, suitable to pass to `ZSTD_decompress` or similar, or an error code if input is invalid


unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
 

`src` should point the start of a series of ZSTD encoded and/or skippable frames `srcSize` must be the _exact_ size of this series (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`) @return : - decompressed size of all data in all successive frames - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN - if an error occurred: ZSTD_CONTENTSIZE_ERROR note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. In which case, it's necessary to use streaming mode to decompress data. note 2 : decompressed size is always present when compression is done with ZSTD_compress() note 3 : decompressed size can be very large (64-bits value), potentially larger than what local system can handle as a single memory segment. In which case, it's necessary to use streaming mode to decompress data. note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. Always ensure result fits within application's authorized limits. Each application can set its own limits. note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to read each contained frame header. This is fast as most of the data is skipped, however it does mean that all frame data must be present and valid.


size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
 

`src` should point to the start of a ZSTD frame `srcSize` must be >= ZSTD_frameHeaderSize_prefix. @return : size of the Frame Header


Context memory usage


 
 
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
 size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
 size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
 size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
 

These functions give the current memory usage of selected object. - Object memory usage can evolve if it's re-used multiple times. + Object memory usage can evolve when re-used multiple times.


size_t ZSTD_estimateCCtxSize(int compressionLevel);
-size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
 size_t ZSTD_estimateDCtxSize(void);
 

These functions make it possible to estimate memory usage of a future {D,C}Ctx, before its creation. ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one. It will also consider src size to be arbitrarily "large", which is worst case. - If srcSize is known to always be small, ZSTD_estimateCCtxSize_advanced() can provide a tighter estimation. - ZSTD_estimateCCtxSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation. + ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is > 1. Note : CCtx estimation is only correct for single-threaded compression


size_t ZSTD_estimateCStreamSize(int compressionLevel);
-size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
 size_t ZSTD_estimateDStreamSize(size_t windowSize);
 size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
 

ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. It will also consider src size to be arbitrarily "large", which is worst case. - If srcSize is known to always be small, ZSTD_estimateCStreamSize_advanced() can provide a tighter estimation. - ZSTD_estimateCStreamSize_advanced() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. + ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbThreads is set to a value > 1. Note : CStream estimation is only correct for single-threaded compression. ZSTD_DStream memory budget depends on window Size. This information can be passed manually, using ZSTD_estimateDStreamSize, or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); Note : if streaming is init with function ZSTD_init?Stream_usingDict(), an internal ?Dict will be created, which additional size is not estimated here. In this case, get total size by adding ZSTD_estimate?DictSize


+
typedef enum {
+    ZSTD_dlm_byCopy = 0,     /**< Copy dictionary content internally */
+    ZSTD_dlm_byRef,          /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+

size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
-size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference);
-size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference);
+size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
 

ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). - ZSTD_estimateCStreamSize_advanced() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced(). - Note : dictionary created "byReference" are smaller + ZSTD_estimateCStreamSize_advanced_usingCParams() makes it possible to control precisely compression parameters, like ZSTD_createCDict_advanced(). + Note : dictionary created by reference using ZSTD_dlm_byRef are smaller +


Advanced compression functions


 
 
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
 

Create a ZSTD compression context using external alloc and free functions


ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
 

workspace: The memory area to emplace the context into. Provided pointer must 8-bytes aligned. It must outlive context usage. workspaceSize: Use ZSTD_estimateCCtxSize() or ZSTD_estimateCStreamSize() to determine how large workspace must be to support scenario. @return : pointer to ZSTD_CCtx*, or NULL if error (size too small) Note : zstd will never resize nor malloc() when using a static cctx. If it needs more memory than available, it will simply error out. Note 2 : there is no corresponding "free" function. Since workspace was allocated externally, it must be freed externally too. Limitation 1 : currently not compatible with internal CDict creation, such as ZSTD_CCtx_loadDictionary() or ZSTD_initCStream_usingDict(). Limitation 2 : currently not compatible with multi-threading


-
typedef enum {
-    ZSTD_p_forceWindow,   /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */
-    ZSTD_p_forceRawDict   /* Force loading dictionary in "content-only" mode (no header analysis) */
-} ZSTD_CCtxParameter;
-

-
size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
-

Set advanced parameters, selected through enum ZSTD_CCtxParameter - @result : 0, or an error code (which can be tested with ZSTD_isError()) -


-
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
 

Create a digested dictionary for compression Dictionary content is simply referenced, and therefore stays in dictBuffer. It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict


typedef enum { ZSTD_dm_auto=0,        /* dictionary is "full" if it starts with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
                ZSTD_dm_rawContent,    /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
                ZSTD_dm_fullDict       /* refuses to load a dictionary if it does not respect Zstandard's specification */
 } ZSTD_dictMode_e;
 

ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
-                                      unsigned byReference, ZSTD_dictMode_e dictMode,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictMode_e dictMode,
                                       ZSTD_compressionParameters cParams,
                                       ZSTD_customMem customMem);
 

Create a ZSTD_CDict using external alloc and free, and customized compression parameters


ZSTD_CDict* ZSTD_initStaticCDict(
                 void* workspace, size_t workspaceSize,
           const void* dict, size_t dictSize,
-                unsigned byReference, ZSTD_dictMode_e dictMode,
+                ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode,
                 ZSTD_compressionParameters cParams);
 

Generate a digested dictionary in provided memory area. workspace: The memory area to emplace the dictionary into. Provided pointer must 8-bytes aligned. It must outlive dictionary usage. workspaceSize: Use ZSTD_estimateCDictSize() to determine how large workspace must be. cParams : use ZSTD_getCParams() to transform a compression level into its relevants cParams. @return : pointer to ZSTD_CDict*, or NULL if error (size too small) Note : there is no corresponding "free" function. Since workspace was allocated externally, it must be freed externally.


ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 

@return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. `estimatedSrcSize` value is optional, select 0 if not known


ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
 

same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. All fields of `ZSTD_frameParameters` are set to default (0)


size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
 

Ensure param values remain within authorized range


ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
 

optimize params for a given `srcSize` and `dictSize`. both values are optional, select `0` if unknown.


size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
                       void* dst, size_t dstCapacity,
                 const void* src, size_t srcSize,
                 const void* dict,size_t dictSize,
                       ZSTD_parameters params);
 

Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter


size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
                       void* dst, size_t dstCapacity,
                 const void* src, size_t srcSize,
                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
 

Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters


Advanced decompression functions


 
 
unsigned ZSTD_isFrame(const void* buffer, size_t size);
 

Tells if the content of `buffer` starts with a valid Frame Identifier. Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. Note 3 : Skippable Frame Identifiers are considered valid.


ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
 

Create a ZSTD decompression context using external alloc and free functions


ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
 

workspace: The memory area to emplace the context into. Provided pointer must 8-bytes aligned. It must outlive context usage. workspaceSize: Use ZSTD_estimateDCtxSize() or ZSTD_estimateDStreamSize() to determine how large workspace must be to support scenario. @return : pointer to ZSTD_DCtx*, or NULL if error (size too small) Note : zstd will never resize nor malloc() when using a static dctx. If it needs more memory than available, it will simply error out. Note 2 : static dctx is incompatible with legacy support Note 3 : there is no corresponding "free" function. Since workspace was allocated externally, it must be freed externally. Limitation : currently not compatible with internal DDict creation, such as ZSTD_initDStream_usingDict().


ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
 

Create a digested dictionary, ready to start decompression operation without startup delay. Dictionary content is referenced, and therefore stays in dictBuffer. It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict


ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
-                                      unsigned byReference, ZSTD_customMem customMem);
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_customMem customMem);
 

Create a ZSTD_DDict using external alloc and free, optionally by reference


ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize,
                                  const void* dict, size_t dictSize,
-                                 unsigned byReference);
+                                 ZSTD_dictLoadMethod_e dictLoadMethod);
 

Generate a digested dictionary in provided memory area. workspace: The memory area to emplace the dictionary into. Provided pointer must 8-bytes aligned. It must outlive dictionary usage. workspaceSize: Use ZSTD_estimateDDictSize() to determine how large workspace must be. @return : pointer to ZSTD_DDict*, or NULL if error (size too small) Note : there is no corresponding "free" function. Since workspace was allocated externally, it must be freed externally.


unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
 

Provides the dictID stored within dictionary. if @return == 0, the dictionary is not conformant with Zstandard specification. It can still be loaded, but as a content-only dictionary.


unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
 

Provides the dictID of the dictionary loaded into `ddict`. If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. Non-conformant dictionaries can still be loaded, but as content-only dictionaries.


unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
 

Provides the dictID required to decompressed the frame stored within `src`. If @return == 0, the dictID could not be decoded. This could for one of the following reasons : - The frame does not require a dictionary to be decoded (most common case). - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. Note : this use case also happens when using a non-conformant dictionary. - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - This is not a Zstandard frame. When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code.


Advanced streaming functions


 
 

Advanced Streaming compression functions

ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
 ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct, a size of 0 means unknown.  for a frame size of 0 use initCStream_advanced */
-size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. */
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
-                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
+                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
 

size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
 

start a new compression job, using same parameters from previous job. This is typically useful to skip dictionary loading stage, since it will re-use it in-place.. Note that zcs must be init at least once before using ZSTD_resetCStream(). pledgedSrcSize==0 means "srcSize unknown". If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. @return : 0, or an error code (which can be tested using ZSTD_isError())


-

Advanced Streaming decompression functions

typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
-ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+

Advanced Streaming decompression functions

ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
 ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
-size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
-size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
-size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
+size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);   /* obsolete : this API will be removed in a future version */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict is referenced, it must outlive decompression session */
 size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
 

Buffer-less and synchronous inner streaming functions

   This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-  But it's also a complex one, with many restrictions (documented below).
-  Prefer using normal streaming API for an easier experience
+  But it's also a complex one, with several restrictions, documented below.
+  Prefer normal streaming API for an easier experience.
  
 

Buffer-less streaming compression (synchronous mode)

   A ZSTD_CCtx object is required to track streaming operations.
   Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
   ZSTD_CCtx object can be re-used multiple times within successive compression operations.
 
   Start by initializing a context.
   Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
   or ZSTD_compressBegin_advanced(), for finer parameter control.
   It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
 
   Then, consume your input using ZSTD_compressContinue().
   There are some important considerations to keep in mind when using this advanced function :
-  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only.
-  - Interface is synchronous : input is consumed entirely and produce 1+ (or more) compressed blocks.
+  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
   - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
     Worst case evaluation is provided by ZSTD_compressBound().
     ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
   - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
     It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
   - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
     In which case, it will "discard" the relevant memory section from its history.
 
   Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
   It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
-  Without last block mark, frames will be considered unfinished (corrupted) by decoders.
+  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
 
-  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame.
+  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
 

Buffer-less streaming compression functions

size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
 size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize=0 means null-size */
 size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize can be 0, indicating unknown size.  if it is non-zero, it must be accurate.  for 0 size frames, use compressBegin_advanced */
 

Buffer-less streaming decompression (synchronous mode)

   A ZSTD_DCtx object is required to track streaming operations.
   Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
   A ZSTD_DCtx object can be re-used multiple times.
 
   First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
-  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
-  such as minimum rolling buffer size to allocate to decompress data (`windowSize`),
-  and the dictionary ID in use.
-  (Note : content size is optional, it may not be present. 0 means : content size unknown).
-  Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information.
-  As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation.
-  Each application can set its own limit, depending on local restrictions.
-  For extended interoperability, it is recommended to support windowSize of at least 8 MB.
   Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
   Data fragment must be large enough to ensure successful decoding.
-  `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
   @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
            >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
            errorCode, which can be tested using ZSTD_isError().
 
-  Start decompression, with ZSTD_decompressBegin().
+  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+  As a consequence, check that values remain within valid application range.
+  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+  Each application can set its own limits, depending on local restrictions.
+  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+  ZSTD_decompressContinue() is very sensitive to contiguity,
+  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+  There are multiple ways to guarantee this condition.
+
+  The most memory efficient way is to use a round buffer of sufficient size.
+  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+  which can @return an error code if required value is too large for current system (in 32-bits mode).
+  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+  At which point, decoding can resume from the beginning of the buffer.
+  Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+  Finally, if you control the compression process, you can also ignore all buffer size rules,
+  as long as the encoder and decoder progress in "lock-step",
+  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
   If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
-  Alternatively, you can copy a prepared context, using ZSTD_copyDCtx().
 
   Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
   ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
   ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
 
-  @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
-  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some metadata item.
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
   It can also be an error code, which can be tested with ZSTD_isError().
 
-  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
-  They should preferably be located contiguously, prior to current block.
-  Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
-  ZSTD_decompressContinue() is very sensitive to contiguity,
-  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
-  or that previous contiguous segment is large enough to properly handle maximum back-reference.
-
   A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
   Context can then be reset to start a new decompression.
 
   Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
   This information is not required to properly decode a frame.
 
   == Special case : skippable frames 
 
   Skippable frames allow integration of user-defined data into a flow of concatenated frames.
-  Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frames is as follows :
+  Skippable frames will be ignored (skipped) by decompressor.
+  The format of skippable frames is as follows :
   a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
   b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
   c) Frame Content - any content (User Data) of length equal to Frame Size
-  For skippable frames ZSTD_decompressContinue() always returns 0.
-  For skippable frames ZSTD_getFrameHeader() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
-    Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content.
-           For purposes of decompression, it is valid in both cases to skip the frame using
-           ZSTD_findFrameCompressedSize to find its size in bytes.
-  It also returns Frame Size as fparamsPtr->frameContentSize.
+  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
 

Buffer-less streaming decompression functions

typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
 typedef struct {
-    unsigned long long frameContentSize; /* ZSTD_CONTENTSIZE_UNKNOWN means this field is not available. 0 means "empty" */
+    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
     unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
+    unsigned blockSizeMax;
     ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
     unsigned headerSize;
     unsigned dictID;
     unsigned checksumFlag;
 } ZSTD_frameHeader;
 size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
-size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
-size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
-size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
 

typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
 

-

New advanced API (experimental, and compression only)


+

New advanced API (experimental)


+
 
typedef enum {
+    /* Question : should we have a format ZSTD_f_auto ?
+     * For the time being, it would mean exactly the same as ZSTD_f_zstd1.
+     * But, in the future, should several formats be supported,
+     * on the compression side, it would mean "default format".
+     * On the decompression side, it would mean "multi format",
+     * and ZSTD_f_zstd1 could be reserved to mean "accept *only* zstd frames".
+     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+     * This question could be kept for later, when there are actually multiple formats to support,
+     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+    ZSTD_f_zstd1 = 0,        /* zstd frame format, specified in zstd_compression_format.md (default) */
+    ZSTD_f_zstd1_magicless,  /* Variant of zstd frame format, without initial 4-bytes magic number.
+                              * Useful to save 4 bytes per generated frame.
+                              * Decoder cannot recognise automatically this format, requiring instructions. */
+} ZSTD_format_e;
+

+
typedef enum {
+    /* compression format */
+    ZSTD_p_format = 10,      /* See ZSTD_format_e enum definition.
+                              * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
+
     /* compression parameters */
     ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
                               * Default level is ZSTD_CLEVEL_DEFAULT==3.
                               * Special: value 0 means "do not change cLevel". */
     ZSTD_p_windowLog,        /* Maximum allowed back-reference distance, expressed as power of 2.
                               * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
-                              * Special: value 0 means "do not change windowLog". */
+                              * Special: value 0 means "do not change windowLog".
+                              * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
+                              * requires setting the maximum window size at least as large during decompression. */
     ZSTD_p_hashLog,          /* Size of the probe table, as a power of 2.
                               * Resulting table size is (1 << (hashLog+2)).
                               * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
                               * Larger tables improve compression ratio of strategies <= dFast,
                               * and improve speed of strategies > dFast.
                               * Special: value 0 means "do not change hashLog". */
     ZSTD_p_chainLog,         /* Size of the full-search table, as a power of 2.
                               * Resulting table size is (1 << (chainLog+2)).
                               * Larger tables result in better and slower compression.
                               * This parameter is useless when using "fast" strategy.
                               * Special: value 0 means "do not change chainLog". */
     ZSTD_p_searchLog,        /* Number of search attempts, as a power of 2.
                               * More attempts result in better and slower compression.
                               * This parameter is useless when using "fast" and "dFast" strategies.
                               * Special: value 0 means "do not change searchLog". */
     ZSTD_p_minMatch,         /* Minimum size of searched matches (note : repCode matches can be smaller).
                               * Larger values make faster compression and decompression, but decrease ratio.
                               * Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
                               * Note that currently, for all strategies < btopt, effective minimum is 4.
                               * Note that currently, for all strategies > fast, effective maximum is 6.
                               * Special: value 0 means "do not change minMatchLength". */
     ZSTD_p_targetLength,     /* Only useful for strategies >= btopt.
                               * Length of Match considered "good enough" to stop search.
                               * Larger values make compression stronger and slower.
                               * Special: value 0 means "do not change targetLength". */
     ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
                               * Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
                               * The higher the value of selected strategy, the more complex it is,
                               * resulting in stronger and slower compression.
                               * Special: value 0 means "do not change strategy". */
 
     /* frame parameters */
     ZSTD_p_contentSizeFlag=200, /* Content size is written into frame header _whenever known_ (default:1)
                               * note that content size must be known at the beginning,
                               * it is sent using ZSTD_CCtx_setPledgedSrcSize() */
     ZSTD_p_checksumFlag,     /* A 32-bits checksum of content is written at end of frame (default:0) */
     ZSTD_p_dictIDFlag,       /* When applicable, dictID of dictionary is provided in frame header (default:1) */
 
-    /* dictionary parameters (must be set before ZSTD_CCtx_loadDictionary) */
-    ZSTD_p_dictMode=300,     /* Select how dictionary content must be interpreted. Value must be from type ZSTD_dictMode_e.
-                              * default : 0==auto : dictionary will be "full" if it respects specification, otherwise it will be "rawContent" */
-    ZSTD_p_refDictContent,   /* Dictionary content will be referenced, instead of copied (default:0==byCopy).
-                              * It requires that dictionary buffer outlives its users */
-
     /* multi-threading parameters */
     ZSTD_p_nbThreads=400,    /* Select how many threads a compression job can spawn (default:1)
                               * More threads improve speed, but also increase memory usage.
                               * Can only receive a value > 1 if ZSTD_MULTITHREAD is enabled.
                               * Special: value 0 means "do not change nbThreads" */
     ZSTD_p_jobSize,          /* Size of a compression job. Each compression job is completed in parallel.
                               * 0 means default, which is dynamically determined based on compression parameters.
                               * Job size must be a minimum of overlapSize, or 1 KB, whichever is largest
                               * The minimum size is automatically and transparently enforced */
     ZSTD_p_overlapSizeLog,   /* Size of previous input reloaded at the beginning of each job.
                               * 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
 
     /* advanced parameters - may not remain available after API update */
     ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
                               * even when referencing into Dictionary content (default:0) */
+    ZSTD_p_enableLongDistanceMatching=1200,  /* Enable long distance matching.
+                                         * This parameter is designed to improve the compression
+                                         * ratio for large inputs with long distance matches.
+                                         * This increases the memory usage as well as window size.
+                                         * Note: setting this parameter sets all the LDM parameters
+                                         * as well as ZSTD_p_windowLog. It should be set after
+                                         * ZSTD_p_compressionLevel and before ZSTD_p_windowLog and
+                                         * other LDM parameters. Setting the compression level
+                                         * after this parameter overrides the window log, though LDM
+                                         * will remain enabled until explicitly disabled. */
+    ZSTD_p_ldmHashLog,   /* Size of the table for long distance matching, as a power of 2.
+                          * Larger values increase memory usage and compression ratio, but decrease
+                          * compression speed.
+                          * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+                          * (default: windowlog - 7). */
+    ZSTD_p_ldmMinMatch,  /* Minimum size of searched matches for long distance matcher.
+                          * Larger/too small values usually decrease compression ratio.
+                          * Must be clamped between ZSTD_LDM_MINMATCH_MIN
+                          * and ZSTD_LDM_MINMATCH_MAX (default: 64). */
+    ZSTD_p_ldmBucketSizeLog,  /* Log size of each bucket in the LDM hash table for collision resolution.
+                               * Larger values usually improve collision resolution but may decrease
+                               * compression speed.
+                               * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */
+    ZSTD_p_ldmHashEveryLog,  /* Frequency of inserting/looking up entries in the LDM hash table.
+                              * The default is MAX(0, (windowLog - ldmHashLog)) to
+                              * optimize hash table usage.
+                              * Larger values improve compression speed. Deviating far from the
+                              * default value will likely result in a decrease in compression ratio.
+                              * Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */
 
 } ZSTD_cParameter;
 

size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
 

Set one compression parameter, selected by enum ZSTD_cParameter. Note : when `value` is an enum, cast it to unsigned for proper type checking. @result : 0, or an error code (which can be tested with ZSTD_isError()).


size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
 

Total input data size to be compressed as a single frame. This value will be controlled at the end, and result in error if not respected. @result : 0, or an error code (which can be tested with ZSTD_isError()). Note 1 : 0 means zero, empty. In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. Note that ZSTD_CONTENTSIZE_UNKNOWN is default value for new compression jobs. Note 2 : If all data is provided and consumed in a single round, this value is overriden by srcSize instead.


size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode);
 

Create an internal CDict from dict buffer. Decompression will have to use same buffer. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, meaning "return to no-dictionary mode". - Note 1 : `dict` content will be copied internally, - except if ZSTD_p_refDictContent is set before loading. + Note 1 : `dict` content will be copied internally. Use + ZSTD_CCtx_loadDictionary_byReference() to reference dictionary + content instead. The dictionary buffer must then outlive its + users. Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters. For this reason, compression parameters cannot be changed anymore after loading a dictionary. It's also a CPU-heavy operation, with non-negligible impact on latency. Note 3 : Dictionary will be used for all future compression jobs. - To return to "no-dictionary" situation, load a NULL dictionary + To return to "no-dictionary" situation, load a NULL dictionary + Note 5 : Use ZSTD_CCtx_loadDictionary_advanced() to select how dictionary + content will be interpreted. +


size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
 

Reference a prepared dictionary, to be used for all next compression jobs. Note that compression parameters are enforced from within CDict, and supercede any compression parameter previously set within CCtx. The dictionary will remain valid for future compression jobs using same CCtx. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : adding a NULL CDict means "return to no-dictionary mode". Note 1 : Currently, only one dictionary can be managed. Adding a new dictionary effectively "discards" any previous one. Note 2 : CDict is just referenced, its lifetime must outlive CCtx.


size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
+size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode);
 

Reference a prefix (single-usage dictionary) for next compression job. Decompression need same prefix to properly regenerate data. Prefix is **only used once**. Tables are discarded at end of compression job. Subsequent compression jobs will be done without prefix (if none is explicitly referenced). If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead. @result : 0, or an error code (which can be tested with ZSTD_isError()). Special : Adding any prefix (including NULL) invalidates any previous prefix or dictionary Note 1 : Prefix buffer is referenced. It must outlive compression job. Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters. It's a CPU-heavy operation, with non-negligible impact on latency. - Note 3 : it's possible to alter ZSTD_p_dictMode using ZSTD_CCtx_setParameter() + Note 3 : By default, the prefix is treated as raw content + (ZSTD_dm_rawContent). Use ZSTD_CCtx_refPrefix_advanced() to alter + dictMode.


typedef enum {
     ZSTD_e_continue=0, /* collect more data, encoder transparently decides when to output result, for optimal conditions */
     ZSTD_e_flush,      /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
-    ZSTD_e_end         /* flush any remaining data and ends current frame. Any future compression starts a new frame. */
+    ZSTD_e_end         /* flush any remaining data and close current frame. Any additional data starts a new frame. */
 } ZSTD_EndDirective;
 

size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
                               ZSTD_outBuffer* output,
                               ZSTD_inBuffer* input,
                               ZSTD_EndDirective endOp);
 

Behave about the same as ZSTD_compressStream. To note : - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter() - Compression parameters cannot be changed once compression is started. - - *dstPos must be <= dstCapacity, *srcPos must be <= srcSize - - *dspPos and *srcPos will be updated. They are guaranteed to remain below their respective limit. + - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize + - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. - @return provides the minimum amount of data still to flush from internal buffers or an error code, which can be tested using ZSTD_isError(). if @return != 0, flush is not fully completed, there is some data left within internal buffers. - after a ZSTD_e_end directive, if internal buffer is not fully flushed, only ZSTD_e_end or ZSTD_e_flush operations are allowed. It is necessary to fully flush internal buffers before starting a new compression job, or changing compression parameters.


void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);   /* Not ready yet ! */
 

Return a CCtx to clean state. Useful after an error, or to interrupt an ongoing compression job and start a new one. Any internal data not yet flushed is cancelled. Dictionary (if any) is dropped. + All parameters are back to default values. It's possible to modify compression parameters after a reset.


size_t ZSTD_compress_generic_simpleArgs (
                 ZSTD_CCtx* cctx,
                 void* dst, size_t dstCapacity, size_t* dstPos,
           const void* src, size_t srcSize, size_t* srcPos,
                 ZSTD_EndDirective endOp);
 

Same as ZSTD_compress_generic(), but using only integral types as arguments. - Argument list is larger and less expressive than ZSTD_{in,out}Buffer, + Argument list is larger than ZSTD_{in,out}Buffer, but can be helpful for binders from dynamic languages which have troubles handling structures containing memory pointers.


-

Block functions

-    Block functions produce and decode raw zstd blocks, without frame metadata.
-    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+
ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+

Quick howto : + - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure + - ZSTD_CCtxParam_setParameter() : Push parameters one by one into + an existing ZSTD_CCtx_params structure. + This is similar to + ZSTD_CCtx_setParameter(). + - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to + an existing CCtx. + These parameters will be applied to + all subsequent compression jobs. + - ZSTD_compress_generic() : Do compression using the CCtx. + - ZSTD_freeCCtxParams() : Free the memory. + + This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() + for static allocation for single-threaded compression. + +


+ +
size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params);
+

Reset params to default, with the default compression level. + +


+ +
size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+

Initializes the compression parameters of cctxParams according to + compression level. All other parameters are reset to their default values. + +


+ +
size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+

Initializes the compression and frame parameters of cctxParams according to + params. All other parameters are reset to their default values. + +


+ +
size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
+

Similar to ZSTD_CCtx_setParameter. + Set one compression parameter, selected by enum ZSTD_cParameter. + Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). + Note : when `value` is an enum, cast it to unsigned for proper type checking. + @result : 0, or an error code (which can be tested with ZSTD_isError()). + +


+ +
size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+

Apply a set of ZSTD_CCtx_params to the compression context. + This must be done before the dictionary is loaded. + The pledgedSrcSize is treated as unknown. + Multithreading parameters are applied only if nbThreads > 1. + +


+ +

Advanced parameters for decompression API


+
size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);   /* not implemented */
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);   /* not implemented */
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode);   /* not implemented */
+

Create an internal DDict from dict buffer, + to be used to decompress next frames. + @result : 0, or an error code (which can be tested with ZSTD_isError()). + Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, + meaning "return to no-dictionary mode". + Note 1 : `dict` content will be copied internally. + Use ZSTD_DCtx_loadDictionary_byReference() + to reference dictionary content instead. + In which case, the dictionary buffer must outlive its users. + Note 2 : Loading a dictionary involves building tables, + which has a non-negligible impact on CPU usage and latency. + Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select + how dictionary content will be interpreted and loaded. + +


+ +
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);   /* not implemented */
+

Reference a prepared dictionary, to be used to decompress next frames. + The dictionary remains active for decompression of future frames using same DCtx. + @result : 0, or an error code (which can be tested with ZSTD_isError()). + Note 1 : Currently, only one dictionary can be managed. + Referencing a new dictionary effectively "discards" any previous one. + Special : adding a NULL DDict means "return to no-dictionary mode". + Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. + +


+ +
size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize);   /* not implemented */
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode);   /* not implemented */
+

Reference a prefix (single-usage dictionary) for next compression job. + Prefix is **only used once**. It must be explicitly referenced before each frame. + If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead. + @result : 0, or an error code (which can be tested with ZSTD_isError()). + Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary + Note 2 : Prefix buffer is referenced. It must outlive compression job. + Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent). + Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. + Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. + +


+ +
size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+

Refuses allocating internal buffers for frames requiring a window size larger than provided limit. + This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario). + This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode. + By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX) + @return : 0, or an error code (which can be tested using ZSTD_isError()). + +


+ +
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+

Instruct the decoder context about what kind of data to decode next. + This instruction is mandatory to decode data without a fully-formed header, + such ZSTD_f_zstd1_magicless for example. + @return : 0, or an error code (which can be tested using ZSTD_isError()). + +


+ +
size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
+                               ZSTD_outBuffer* output,
+                               ZSTD_inBuffer* input);
+

Behave the same as ZSTD_decompressStream. + Decompression parameters cannot be changed once decompression is started. + @return : an error code, which can be tested using ZSTD_isError() + if >0, a hint, nb of expected input bytes for next invocation. + `0` means : a frame has just been fully decoded and flushed. + +


+ +
size_t ZSTD_decompress_generic_simpleArgs (
+                ZSTD_DCtx* dctx,
+                void* dst, size_t dstCapacity, size_t* dstPos,
+          const void* src, size_t srcSize, size_t* srcPos);
+

Same as ZSTD_decompress_generic(), + but using only integral types as arguments. + Argument list is larger than ZSTD_{in,out}Buffer, + but can be helpful for binders from dynamic languages + which have troubles handling structures containing memory pointers. + +


+ +
void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
+

Return a DCtx to clean state. + If a decompression was ongoing, any internal data not yet flushed is cancelled. + All parameters are back to default values, including sticky ones. + Dictionary (if any) is dropped. + Parameters can be modified again after a reset. + +


+ +

Block level API


+
+

Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). User will have to take in charge required information to regenerate data, such as compressed and content sizes. A few rules to respect : - Compressing and decompressing require a context structure + Use ZSTD_createCCtx() and ZSTD_createDCtx() - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary + copyCCtx() and copyDCtx() can be used too - - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX + - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead. Frame metadata is not that costly, and quickly becomes negligible as source size grows larger. - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero. In which case, nothing is produced into `dst`. + User must test for such outcome and deal directly with uncompressed data + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!! + In case of multiple successive blocks, should some of them be uncompressed, decoder must be informed of their existence in order to follow proper history. Use ZSTD_insertBlock() for such a case. -

+


Raw zstd block functions

size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
 size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
 size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert block into `dctx` history. Useful for uncompressed blocks */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression */
 

Index: vendor/zstd/dist/lib/.gitignore =================================================================== --- vendor/zstd/dist/lib/.gitignore (revision 325596) +++ vendor/zstd/dist/lib/.gitignore (revision 325597) @@ -1,2 +1,3 @@ # make install artefact libzstd.pc +libzstd-nomt Index: vendor/zstd/dist/lib/Makefile =================================================================== --- vendor/zstd/dist/lib/Makefile (revision 325596) +++ vendor/zstd/dist/lib/Makefile (revision 325597) @@ -1,175 +1,189 @@ -# ########################################################################## -# Copyright (c) 2016-present, Yann Collet, Facebook, Inc. +# ################################################################ +# Copyright (c) 2015-present, Yann Collet, Facebook, Inc. # All rights reserved. # -# This Makefile is validated for Linux, macOS, *BSD, Hurd, Solaris, MSYS2 targets -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. -# ########################################################################## +# This source code is licensed under both the BSD-style license (found in the +# LICENSE file in the root directory of this source tree) and the GPLv2 (found +# in the COPYING file in the root directory of this source tree). +# ################################################################ # Version numbers LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < ./zstd.h` LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT) LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT)) LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT)) LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT)) LIBVER := $(shell echo $(LIBVER_SCRIPT)) VERSION?= $(LIBVER) CPPFLAGS+= -I. -I./common -DXXH_NAMESPACE=ZSTD_ CFLAGS ?= -O3 DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \ -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \ -Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security \ -Wvla -Wformat=2 -Winit-self -Wfloat-equal -Wwrite-strings \ -Wredundant-decls CFLAGS += $(DEBUGFLAGS) $(MOREFLAGS) FLAGS = $(CPPFLAGS) $(CFLAGS) -ZSTD_FILES := $(wildcard common/*.c compress/*.c decompress/*.c dictBuilder/*.c deprecated/*.c) +ZSTD_FILES := $(sort $(wildcard common/*.c compress/*.c decompress/*.c dictBuilder/*.c deprecated/*.c)) ZSTD_LEGACY_SUPPORT ?= 4 ifneq ($(ZSTD_LEGACY_SUPPORT), 0) ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0) ZSTD_FILES += $(shell ls legacy/*.c | grep 'v0[$(ZSTD_LEGACY_SUPPORT)-7]') endif CPPFLAGS += -I./legacy endif CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT) ZSTD_OBJ := $(patsubst %.c,%.o,$(ZSTD_FILES)) # OS X linker doesn't support -soname, and use different extension # see : https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryDesignGuidelines.html ifeq ($(shell uname), Darwin) SHARED_EXT = dylib SHARED_EXT_MAJOR = $(LIBVER_MAJOR).$(SHARED_EXT) SHARED_EXT_VER = $(LIBVER).$(SHARED_EXT) SONAME_FLAGS = -install_name $(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) -compatibility_version $(LIBVER_MAJOR) -current_version $(LIBVER) else SONAME_FLAGS = -Wl,-soname=libzstd.$(SHARED_EXT).$(LIBVER_MAJOR) SHARED_EXT = so SHARED_EXT_MAJOR = $(SHARED_EXT).$(LIBVER_MAJOR) SHARED_EXT_VER = $(SHARED_EXT).$(LIBVER) endif LIBZSTD = libzstd.$(SHARED_EXT_VER) .PHONY: default all clean install uninstall default: lib-release all: lib libzstd.a: ARFLAGS = rcs libzstd.a: $(ZSTD_OBJ) @echo compiling static library @$(AR) $(ARFLAGS) $@ $^ libzstd.a-mt: CPPFLAGS += -DZSTD_MULTITHREAD libzstd.a-mt: libzstd.a $(LIBZSTD): LDFLAGS += -shared -fPIC -fvisibility=hidden $(LIBZSTD): $(ZSTD_FILES) @echo compiling dynamic library $(LIBVER) ifneq (,$(filter Windows%,$(OS))) @$(CC) $(FLAGS) -DZSTD_DLL_EXPORT=1 -shared $^ -o dll\libzstd.dll dlltool -D dll\libzstd.dll -d dll\libzstd.def -l dll\libzstd.lib else @$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ @echo creating versioned links @ln -sf $@ libzstd.$(SHARED_EXT_MAJOR) @ln -sf $@ libzstd.$(SHARED_EXT) endif libzstd : $(LIBZSTD) libzstd-mt : CPPFLAGS += -DZSTD_MULTITHREAD libzstd-mt : libzstd lib: libzstd.a libzstd lib-mt: CPPFLAGS += -DZSTD_MULTITHREAD lib-mt: lib lib-release lib-release-mt: DEBUGFLAGS := lib-release: lib lib-release-mt: lib-mt +# Special case : building library in single-thread mode _and_ without zstdmt_compress.c +ZSTDMT_FILES = compress/zstdmt_compress.c +ZSTD_NOMT_FILES = $(filter-out $(ZSTDMT_FILES),$(ZSTD_FILES)) +libzstd-nomt: LDFLAGS += -shared -fPIC -fvisibility=hidden +libzstd-nomt: $(ZSTD_NOMT_FILES) + @echo compiling single-thread dynamic library $(LIBVER) + @echo files : $(ZSTD_NOMT_FILES) + @$(CC) $(FLAGS) $^ $(LDFLAGS) $(SONAME_FLAGS) -o $@ + clean: @$(RM) -r *.dSYM # Mac OS-X specific @$(RM) core *.o *.a *.gcda *.$(SHARED_EXT) *.$(SHARED_EXT).* libzstd.pc - @$(RM) dll/libzstd.dll dll/libzstd.lib + @$(RM) dll/libzstd.dll dll/libzstd.lib libzstd-nomt* @$(RM) common/*.o compress/*.o decompress/*.o dictBuilder/*.o legacy/*.o deprecated/*.o @echo Cleaning library completed #----------------------------------------------------------------------------- # make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets #----------------------------------------------------------------------------- ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU OpenBSD FreeBSD NetBSD DragonFly SunOS)) -ifneq (,$(filter $(shell uname),SunOS)) -INSTALL ?= ginstall -else -INSTALL ?= install -endif +DESTDIR ?= +# directory variables : GNU conventions prefer lowercase +# see https://www.gnu.org/prep/standards/html_node/Makefile-Conventions.html +# support both lower and uppercase (BSD), use uppercase in script +prefix ?= /usr/local +PREFIX ?= $(prefix) +exec_prefix ?= $(PREFIX) +libdir ?= $(exec_prefix)/lib +LIBDIR ?= $(libdir) +includedir ?= $(PREFIX)/include +INCLUDEDIR ?= $(includedir) -PREFIX ?= /usr/local -DESTDIR ?= -LIBDIR ?= $(PREFIX)/lib -INCLUDEDIR ?= $(PREFIX)/include - ifneq (,$(filter $(shell uname),OpenBSD FreeBSD NetBSD DragonFly)) PKGCONFIGDIR ?= $(PREFIX)/libdata/pkgconfig else PKGCONFIGDIR ?= $(LIBDIR)/pkgconfig endif -INSTALL_LIB ?= $(INSTALL) -m 755 -INSTALL_DATA ?= $(INSTALL) -m 644 +ifneq (,$(filter $(shell uname),SunOS)) +INSTALL ?= ginstall +else +INSTALL ?= install +endif +INSTALL_PROGRAM ?= $(INSTALL) +INSTALL_DATA ?= $(INSTALL) -m 644 + libzstd.pc: libzstd.pc: libzstd.pc.in @echo creating pkgconfig @sed -e 's|@PREFIX@|$(PREFIX)|' \ -e 's|@LIBDIR@|$(LIBDIR)|' \ -e 's|@INCLUDEDIR@|$(INCLUDEDIR)|' \ -e 's|@VERSION@|$(VERSION)|' \ $< >$@ install: libzstd.a libzstd libzstd.pc @$(INSTALL) -d -m 755 $(DESTDIR)$(PKGCONFIGDIR)/ $(DESTDIR)$(INCLUDEDIR)/ @$(INSTALL_DATA) libzstd.pc $(DESTDIR)$(PKGCONFIGDIR)/ @echo Installing libraries @$(INSTALL_DATA) libzstd.a $(DESTDIR)$(LIBDIR) - @$(INSTALL_LIB) libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR) - @ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - @ln -sf libzstd.$(SHARED_EXT_VER) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) + @$(INSTALL_PROGRAM) $(LIBZSTD) $(DESTDIR)$(LIBDIR) + @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) + @ln -sf $(LIBZSTD) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) @echo Installing includes @$(INSTALL_DATA) zstd.h $(DESTDIR)$(INCLUDEDIR) @$(INSTALL_DATA) common/zstd_errors.h $(DESTDIR)$(INCLUDEDIR) @$(INSTALL_DATA) deprecated/zbuff.h $(DESTDIR)$(INCLUDEDIR) # prototypes generate deprecation warnings @$(INSTALL_DATA) dictBuilder/zdict.h $(DESTDIR)$(INCLUDEDIR) @echo zstd static and shared library installed uninstall: @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.a @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT) @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_MAJOR) - @$(RM) $(DESTDIR)$(LIBDIR)/libzstd.$(SHARED_EXT_VER) + @$(RM) $(DESTDIR)$(LIBDIR)/$(LIBZSTD) @$(RM) $(DESTDIR)$(PKGCONFIGDIR)/libzstd.pc @$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd.h @$(RM) $(DESTDIR)$(INCLUDEDIR)/zstd_errors.h @$(RM) $(DESTDIR)$(INCLUDEDIR)/zbuff.h # Deprecated streaming functions @$(RM) $(DESTDIR)$(INCLUDEDIR)/zdict.h @echo zstd libraries successfully uninstalled endif Index: vendor/zstd/dist/lib/README.md =================================================================== --- vendor/zstd/dist/lib/README.md (revision 325596) +++ vendor/zstd/dist/lib/README.md (revision 325597) @@ -1,85 +1,108 @@ Zstandard library files ================================ -The __lib__ directory contains several directories. -Depending on target use case, it's enough to include only files from relevant directories. +The __lib__ directory is split into several sub-directories, +in order to make it easier to select or exclude specific features. +#### Building + +`Makefile` script is provided, supporting the standard set of commands, +directories, and variables (see https://www.gnu.org/prep/standards/html_node/Command-Variables.html). +- `make` : generates both static and dynamic libraries +- `make install` : install libraries in default system directories + + #### API -Zstandard's stable API is exposed within [zstd.h](zstd.h), -at the root of `lib` directory. +Zstandard's stable API is exposed within [lib/zstd.h](zstd.h). #### Advanced API -Some additional API may be useful if you're looking into advanced features : -- common/error_public.h : transforms `size_t` function results into an `enum`, - for precise error handling. -- ZSTD_STATIC_LINKING_ONLY : if you define this macro _before_ including `zstd.h`, - it will give access to advanced and experimental API. +Optional advanced features are exposed via : + +- `lib/common/zstd_errors.h` : translates `size_t` function results + into an `ZSTD_ErrorCode`, for accurate error handling. +- `ZSTD_STATIC_LINKING_ONLY` : if this macro is defined _before_ including `zstd.h`, + it unlocks access to advanced experimental API, + exposed in second part of `zstd.h`. These APIs shall ___never be used with dynamic library___ ! They are not "stable", their definition may change in the future. Only static linking is allowed. -#### ZSTDMT API -To enable multithreaded compression within the library, invoke `make lib-mt` target. -Prototypes are defined in header file `compress/zstdmt_compress.h`. -When linking a program that uses ZSTDMT API against libzstd.a on a POSIX system, -`-pthread` flag must be provided to the compiler and linker. -Note : ZSTDMT prototypes can still be used with a library built without multithread support, -but in this case, they will be single threaded only. - #### Modular build -Directory `common/` is required in all circumstances. -You can select to support compression only, by just adding files from the `compress/` directory, -In a similar way, you can build a decompressor-only library with the `decompress/` directory. +- Directory `lib/common` is always required, for all variants. +- Compression source code lies in `lib/compress` +- Decompression source code lies in `lib/decompress` +- It's possible to include only `compress` or only `decompress`, they don't depend on each other. +- `lib/dictBuilder` : makes it possible to generate dictionaries from a set of samples. + The API is exposed in `lib/dictBuilder/zdict.h`. + This module depends on both `lib/common` and `lib/compress` . +- `lib/legacy` : source code to decompress older zstd formats, starting from `v0.1`. + This module depends on `lib/common` and `lib/decompress`. + To enable this feature, it's necessary to define `ZSTD_LEGACY_SUPPORT = 1` during compilation. + Typically, with `gcc`, add argument `-DZSTD_LEGACY_SUPPORT=1`. + Using higher number limits the number of version supported. + For example, `ZSTD_LEGACY_SUPPORT=2` means : "support legacy formats starting from v0.2+". + The API is exposed in `lib/legacy/zstd_legacy.h`. + Each version also provides a (dedicated) set of advanced API. + For example, advanced API for version `v0.4` is exposed in `lib/legacy/zstd_v04.h` . -Other optional functionalities provided are : -- `dictBuilder/` : source files to create dictionaries. - The API can be consulted in `dictBuilder/zdict.h`. - This module also depends on `common/` and `compress/` . +#### Multithreading support -- `legacy/` : source code to decompress previous versions of zstd, starting from `v0.1`. - This module also depends on `common/` and `decompress/` . - Library compilation must include directive `ZSTD_LEGACY_SUPPORT = 1` . - The main API can be consulted in `legacy/zstd_legacy.h`. - Advanced API from each version can be found in their relevant header file. - For example, advanced API for version `v0.4` is in `legacy/zstd_v04.h` . +Multithreading is disabled by default when building with `make`. +Enabling multithreading requires 2 conditions : +- set macro `ZSTD_MULTITHREAD` +- on POSIX systems : compile with pthread (`-pthread` compilation flag for `gcc` for example) +Both conditions are automatically triggered by invoking `make lib-mt` target. +Note that, when linking a POSIX program with a multithreaded version of `libzstd`, +it's necessary to trigger `-pthread` flag during link stage. -#### Using MinGW+MSYS to create DLL +Multithreading capabilities are exposed via : +- private API `lib/compress/zstdmt_compress.h`. + Symbols defined in this header are currently exposed in `libzstd`, hence usable. + Note however that this API is planned to be locked and remain strictly internal in the future. +- advanced API `ZSTD_compress_generic()`, defined in `lib/zstd.h`, experimental section. + This API is still considered experimental, but is designed to be labelled "stable" at some point in the future. + It's the recommended entry point for multi-threading operations. + +#### Windows : using MinGW+MSYS to create DLL + DLL can be created using MinGW+MSYS with the `make libzstd` command. This command creates `dll\libzstd.dll` and the import library `dll\libzstd.lib`. The import library is only required with Visual C++. The header file `zstd.h` and the dynamic library `dll\libzstd.dll` are required to compile a project using gcc/MinGW. The dynamic library has to be added to linking options. It means that if a project that uses ZSTD consists of a single `test-dll.c` file it should be linked with `dll\libzstd.dll`. For example: ``` gcc $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\libzstd.dll ``` The compiled executable will require ZSTD DLL which is available at `dll\libzstd.dll`. -#### Obsolete streaming API +#### Deprecated API -Streaming is now provided within `zstd.h`. -Older streaming API is still available within `deprecated/zbuff.h`. -It will be removed in a future version. -Consider migrating code towards newer streaming API in `zstd.h`. +Obsolete API on their way out are stored in directory `lib/deprecated`. +At this stage, it contains older streaming prototypes, in `lib/deprecated/zbuff.h`. +Presence in this directory is temporary. +These prototypes will be removed in some future version. +Consider migrating code towards supported streaming API exposed in `zstd.h`. #### Miscellaneous The other files are not source code. There are : - - LICENSE : contains the BSD license text - - Makefile : script to compile or install zstd library (static and dynamic) - - libzstd.pc.in : for pkg-config (`make install`) - - README.md : this file + - `LICENSE` : contains the BSD license text + - `Makefile` : `make` script to build and install zstd library (static and dynamic) + - `BUCK` : support for `buck` build system (https://buckbuild.com/) + - `libzstd.pc.in` : for `pkg-config` (used in `make install`) + - `README.md` : this file Index: vendor/zstd/dist/lib/common/bitstream.h =================================================================== --- vendor/zstd/dist/lib/common/bitstream.h (revision 325596) +++ vendor/zstd/dist/lib/common/bitstream.h (revision 325597) @@ -1,459 +1,471 @@ /* ****************************************************************** bitstream Part of FSE library header file (to include) Copyright (C) 2013-2017, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which must be inlined for best performance. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /*-**************************************** * Dependencies ******************************************/ #include "mem.h" /* unaligned access routines */ #include "error_private.h" /* error codes and messages */ /*-************************************* * Debug ***************************************/ #if defined(BIT_DEBUG) && (BIT_DEBUG>=1) # include #else # ifndef assert # define assert(condition) ((void)0) # endif #endif /*========================================= * Target specific =========================================*/ #if defined(__BMI__) && defined(__GNUC__) # include /* support for bextr (experimental) */ #endif #define STREAM_ACCUMULATOR_MIN_32 25 #define STREAM_ACCUMULATOR_MIN_64 57 #define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) /*-****************************************** * bitStream encoding API (write forward) ********************************************/ /* bitStream can mix input from multiple sources. * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ typedef struct { size_t bitContainer; unsigned bitPos; char* startPtr; char* ptr; char* endPtr; } BIT_CStream_t; MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); /* Start with initCStream, providing the size of buffer to write into. * bitStream will never write outside of this buffer. * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. * * bits are first added to a local register. * Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. * Writing data into memory is an explicit operation, performed by the flushBits function. * Hence keep track how many bits are potentially stored into local register to avoid register overflow. * After a flushBits, a maximum of 7 bits might still be stored into local register. * * Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. * * Last operation is to close the bitStream. * The function returns the final size of CStream in bytes. * If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) */ /*-******************************************** * bitStream decoding API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; const char* limitPtr; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. * Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. * Otherwise, it can be less than that, so proceed accordingly. * Checking if DStream has reached its end can be performed with BIT_endOfDStream(). */ /*-**************************************** * unsafe API ******************************************/ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); /* unsafe version; does not check buffer overflow */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /*-************************************************************** * Internal functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (register U32 val) { + assert(val != 0); + { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - _BitScanReverse ( &r, val ); - return (unsigned) r; + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ - return 31 - __builtin_clz (val); + return 31 - __builtin_clz (val); # else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; # endif + } } /*===== Local Constants =====*/ -static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, - 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, - 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, - 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */ +static const unsigned BIT_mask[] = { + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, + 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, + 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, + 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) - /*-************************************************************** * bitStream encoding ****************************************************************/ /*! BIT_initCStream() : * `dstCapacity` must be > sizeof(size_t) * @return : 0 if success, * otherwise an error code (can be tested using ERR_isError()) */ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity) { bitC->bitContainer = 0; bitC->bitPos = 0; bitC->startPtr = (char*)startPtr; bitC->ptr = bitC->startPtr; bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); return 0; } /*! BIT_addBits() : - * can add up to 26 bits into `bitC`. + * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { + MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); + assert(nbBits < BIT_MASK_SIZE); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_addBitsFast() : * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { assert((value>>nbBits) == 0); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= value << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_flushBitsFast() : * assumption : bitContainer has not overflowed * unsafe version; does not check buffer overflow */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; - assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) ); + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; assert(bitC->ptr <= bitC->endPtr); bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_flushBits() : * assumption : bitContainer has not overflowed * safe version; check for buffer overflow, and prevents it. * note : does not signal buffer overflow. * overflow will be revealed later on using BIT_closeCStream() */ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; - assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) ); + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; bitC->bitPos &= 7; bitC->bitContainer >>= nbBytes*8; } /*! BIT_closeCStream() : * @return : size of CStream, in bytes, * or 0 if it could not fit into dstBuffer */ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) { BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } /*-******************************************************** * bitStream decoding **********************************************************/ /*! BIT_initDStream() : * Initialize a BIT_DStream_t. * `bitD` : a pointer to an already allocated BIT_DStream_t structure. * `srcSize` must be the *exact* size of the bitStream, in bytes. * @return : size of stream (== srcSize), or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } bitD->start = (const char*)srcBuffer; bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } } else { bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); /* fall-through */ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); /* fall-through */ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); /* fall-through */ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */ default: break; } { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } return srcSize; } MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; } MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { #if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */ # if defined(__x86_64__) if (sizeof(bitContainer)==8) return _bextr_u64(bitContainer, start, nbBits); else # endif return _bextr_u32(bitContainer, start, nbBits); #else + assert(nbBits < BIT_MASK_SIZE); return (bitContainer >> start) & BIT_mask[nbBits]; #endif } MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { + assert(nbBits < BIT_MASK_SIZE); return bitContainer & BIT_mask[nbBits]; } /*! BIT_lookBits() : * Provides next n bits from local register. * local register is not modified. * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. * @return : value extracted */ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { #if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); #else U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); #endif } /*! BIT_lookBitsFast() : * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*! BIT_readBits() : * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } /*! BIT_reloadDStream() : * Refill `bitD` from buffer previously set in BIT_initDStream() . * This function is safe, it guarantees it will not read beyond src buffer. * @return : status of `BIT_DStream_t` internal register. * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->limitPtr) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } /* start < ptr < limitPtr */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ return result; } } /*! BIT_endOfDStream() : * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ Index: vendor/zstd/dist/lib/common/compiler.h =================================================================== --- vendor/zstd/dist/lib/common/compiler.h (revision 325596) +++ vendor/zstd/dist/lib/common/compiler.h (revision 325597) @@ -1,85 +1,86 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H /*-******************************************************* * Compiler specifics *********************************************************/ /* force inlining */ #if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else # define INLINE_KEYWORD #endif #if defined(__GNUC__) # define FORCE_INLINE_ATTR __attribute__((always_inline)) #elif defined(_MSC_VER) # define FORCE_INLINE_ATTR __forceinline #else # define FORCE_INLINE_ATTR #endif /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to elimininate the constant * branches. */ #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR /** * HINT_INLINE is used to help the compiler generate better code. It is *not* * used for "templates", so it can be tweaked based on the compilers * performance. * * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the * always_inline attribute. * * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline * attribute. */ #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 # define HINT_INLINE static INLINE_KEYWORD #else # define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR #endif /* force no inlining */ #ifdef _MSC_VER # define FORCE_NOINLINE static __declspec(noinline) #else # ifdef __GNUC__ # define FORCE_NOINLINE static __attribute__((__noinline__)) # else # define FORCE_NOINLINE static # endif #endif /* prefetch */ #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ # include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0) #elif defined(__GNUC__) # define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) #else # define PREFETCH(ptr) /* disabled */ #endif /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif #endif /* ZSTD_COMPILER_H */ Index: vendor/zstd/dist/lib/common/error_private.c =================================================================== --- vendor/zstd/dist/lib/common/error_private.c (revision 325596) +++ vendor/zstd/dist/lib/common/error_private.c (revision 325597) @@ -1,45 +1,47 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* The purpose of this file is to have a single list of error strings embedded in binary */ #include "error_private.h" const char* ERR_getErrorString(ERR_enum code) { static const char* const notErrorCode = "Unspecified error code"; switch( code ) { case PREFIX(no_error): return "No error detected"; case PREFIX(GENERIC): return "Error (generic)"; case PREFIX(prefix_unknown): return "Unknown frame descriptor"; case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; case PREFIX(corruption_detected): return "Corrupted block detected"; case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; case PREFIX(parameter_unsupported): return "Unsupported parameter"; case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; - case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; - case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; + case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; + case PREFIX(srcSize_wrong): return "Src size is incorrect"; + /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(maxCode): default: return notErrorCode; } } Index: vendor/zstd/dist/lib/common/error_private.h =================================================================== --- vendor/zstd/dist/lib/common/error_private.h (revision 325596) +++ vendor/zstd/dist/lib/common/error_private.h (revision 325597) @@ -1,76 +1,76 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* Note : this module is expected to remain private, do not expose it */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* **************************************** * Dependencies ******************************************/ #include /* size_t */ #include "zstd_errors.h" /* enum list */ /* **************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /*-**************************************** * Customization (error_public.h) ******************************************/ typedef ZSTD_ErrorCode ERR_enum; #define PREFIX(name) ZSTD_error_##name /*-**************************************** * Error codes handling ******************************************/ -#ifdef ERROR -# undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ -#endif -#define ERROR(name) ((size_t)-PREFIX(name)) +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#define ERROR(name) ZSTD_ERROR(name) +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /*-**************************************** * Error Strings ******************************************/ const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ ERR_STATIC const char* ERR_getErrorName(size_t code) { return ERR_getErrorString(ERR_getErrorCode(code)); } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ Index: vendor/zstd/dist/lib/common/fse.h =================================================================== --- vendor/zstd/dist/lib/common/fse.h (revision 325596) +++ vendor/zstd/dist/lib/common/fse.h (revision 325597) @@ -1,704 +1,704 @@ /* ****************************************************************** FSE : Finite State Entropy codec Public Prototypes declaration Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef FSE_H #define FSE_H /*-***************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ /*-***************************************** * FSE_PUBLIC_API : control library symbols visibility ******************************************/ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define FSE_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define FSE_PUBLIC_API #endif /*------ Version ------*/ #define FSE_VERSION_MAJOR 0 #define FSE_VERSION_MINOR 9 #define FSE_VERSION_RELEASE 0 #define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE #define FSE_QUOTE(str) #str #define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) #define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) #define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ /*-**************************************** * FSE simple functions ******************************************/ /*! FSE_compress() : Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). @return : size of compressed data (<= dstCapacity). Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. if FSE_isError(return), compression failed (more details using FSE_getErrorName()) */ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /*! FSE_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', into already allocated destination buffer 'dst', of size 'dstCapacity'. @return : size of regenerated data (<= maxDstSize), or an error code, which can be tested using FSE_isError() . ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize); /*-***************************************** * Tool functions ******************************************/ FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ /* Error Management */ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /*-***************************************** * FSE advanced functions ******************************************/ /*! FSE_compress2() : Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' Both parameters can be defined as '0' to mean : use default value @return : size of compressed data Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. if FSE_isError(return), it's an error code. */ FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /*-***************************************** * FSE detailed API ******************************************/ /*! FSE_compress() does the following: 1. count symbol occurrence from source[] into table count[] 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) 3. save normalized counters to memory buffer using writeNCount() 4. build encoding table 'CTable' from normalized counters 5. encode the data stream using encoding table 'CTable' FSE_decompress() does the following: 1. read normalized counters with readNCount() 2. build decoding table 'DTable' from normalized counters 3. decode the data stream using decoding table 'DTable' The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and provide normalized distribution using external method. */ /* *** COMPRESSION *** */ /*! FSE_count(): Provides the precise count of each byte within a table 'count'. 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). *maxSymbolValuePtr will be updated if detected smaller than initial value. @return : the count of the most frequent symbol (which is not identified). if return == srcSize, there is only one symbol. Can also return an error code, which can be tested with FSE_isError(). */ FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /*! FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose. */ FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); /*! FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'. @return : size of the compressed table, or an errorCode, which can be tested using FSE_isError(). */ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue); +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_compress_usingCTable(): Compress `src` using `ct` into `dst` which must be already allocated. @return : size of compressed data (<= `dstCapacity`), or 0 if compressed data could not fit into `dst`, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); /*! Tutorial : ---------- The first step is to count all symbols. FSE_count() does this job very fast. Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. 'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) FSE_count() will return the number of occurrence of the most frequent symbol. This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). The next step is to normalize the frequencies. FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. It also guarantees a minimum of 1 to any Symbol with frequency >= 1. You can use 'tableLog'==0 to mean "use default tableLog value". If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). The result of FSE_normalizeCount() will be saved into a table, called 'normalizedCounter', which is a table of signed short. 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. The return value is tableLog if everything proceeded as expected. It is 0 if there is a single symbol within distribution. If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). 'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). 'buffer' must be already allocated. For guaranteed success, buffer size must be at least FSE_headerBound(). The result of the function is the number of bytes written into 'buffer'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). 'normalizedCounter' can then be used to create the compression table 'CTable'. The space required by 'CTable' must be already allocated, using FSE_createCTable(). You can then use FSE_buildCTable() to fill 'CTable'. If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). 'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. If it returns '0', compressed data could not fit into 'dst'. If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). */ /* *** DECOMPRESSION *** */ /*! FSE_readNCount(): Read compactly saved 'normalizedCounter' from 'rBuffer'. @return : size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of FSE_DTable. Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); /*! FSE_buildDTable(): Builds 'dt', which must be already allocated, using FSE_createDTable(). return : 0, or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_decompress_usingDTable(): Decompress compressed source `cSrc` of size `cSrcSize` using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= `dstCapacity`), or an errorCode, which can be tested using FSE_isError() */ FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : ---------- (Note : these functions only decompress FSE-compressed blocks. If block is uncompressed, use memcpy() instead If block is a single repeated byte, use memset() instead ) The first step is to obtain the normalized frequencies of symbols. This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). 'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. In practice, that means it's necessary to know 'maxSymbolValue' beforehand, or size the table to handle worst case situations (typically 256). FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. If there is an error, the function will return an error code, which can be tested using FSE_isError(). The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. This is performed by the function FSE_buildDTable(). The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). If there is an error, the function will return an error code, which can be tested using FSE_isError(). `FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). `cSrcSize` must be strictly correct, otherwise decompression will fail. FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) */ #endif /* FSE_H */ #if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY /* *** Dependency *** */ #include "bitstream.h" /* ***************************************** * Static allocation *******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7)) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= `1024` unsigned */ size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, unsigned* workSpace); /** FSE_countFast() : * same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */ size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /* FSE_countFast_wksp() : * Same as FSE_countFast(), but using an externally provided scratch buffer. * `workSpace` must be a table of minimum `1024` unsigned */ size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); /*! FSE_count_simple * Same as FSE_countFast(), but does not use any additional memory (not even on stack). * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). */ size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ /* FSE_compress_wksp() : * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. */ #define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); /**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /**< build a fake FSE_CTable, designed to compress always the same symbolValue */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` must be >= `(1<= BIT_DStream_completed When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. Checking if DStream has reached its end is performed by : BIT_endOfDStream(&DStream); Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. FSE_endOfDState(&DState); */ /* ***************************************** * FSE unsafe API *******************************************/ static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ /* ***************************************** * Implementation of inlined functions *******************************************/ typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; /* total 8 bytes */ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) { const void* ptr = ct; const U16* u16ptr = (const U16*) ptr; const U32 tableLog = MEM_read16(ptr); statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); statePtr->stateLog = tableLog; } /*! FSE_initCState2() : * Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) * uses the smallest state value possible, saving the cost of this symbol */ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) { FSE_initCState(statePtr, ct); { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* stateTable = (const U16*)(statePtr->stateTable); U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } } MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) { FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); BIT_addBits(bitC, statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { BIT_addBits(bitC, statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } /* ====== Decompression ====== */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; return DInfo.symbol; } MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /*! FSE_decodeSymbolFast() : unsafe, only works if no symbol has a probability > 50% */ MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; U32 const nbBits = DInfo.nbBits; BYTE const symbol = DInfo.symbol; size_t const lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #ifndef FSE_COMMONDEFS_ONLY /* ************************************************************** * Tuning parameters ****************************************************************/ /*!MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #ifndef FSE_MAX_MEMORY_USAGE # define FSE_MAX_MEMORY_USAGE 14 #endif #ifndef FSE_DEFAULT_MEMORY_USAGE # define FSE_DEFAULT_MEMORY_USAGE 13 #endif /*!FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #ifndef FSE_MAX_SYMBOL_VALUE # define FSE_MAX_SYMBOL_VALUE 255 #endif /* ************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION #define FSE_DECODE_TYPE FSE_decode_t #endif /* !FSE_COMMONDEFS_ONLY */ /* *************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX # error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif #define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) #endif /* FSE_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif Index: vendor/zstd/dist/lib/common/huf.h =================================================================== --- vendor/zstd/dist/lib/common/huf.h (revision 325596) +++ vendor/zstd/dist/lib/common/huf.h (revision 325597) @@ -1,302 +1,302 @@ /* ****************************************************************** Huffman coder, part of New Generation Entropy library header file Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif #ifndef HUF_H_298734234 #define HUF_H_298734234 /* *** Dependencies *** */ #include /* size_t */ /* *** library symbols visibility *** */ /* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, * HUF symbols remain "private" (internal symbols for library only). * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ #if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) # define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) #elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ # define HUF_PUBLIC_API __declspec(dllexport) #elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) # define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ #else # define HUF_PUBLIC_API #endif /* *** simple functions *** */ /** HUF_compress() : Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. 'dst' buffer must be already allocated. Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. @return : size of compressed data (<= `dstCapacity`). Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! if return == 1, srcData is a single repeated byte symbol (RLE compression). if HUF_isError(return), compression failed (more details using HUF_getErrorName()) */ HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize); /** HUF_decompress() : Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', into already allocated buffer 'dst', of minimum size 'dstSize'. `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate. @return : size of regenerated data (== originalSize), or an error code, which can be tested using HUF_isError() */ HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize); /* *** Tool functions *** */ #define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ /* *** Advanced function *** */ /** HUF_compress2() : * Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog`. * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /** HUF_compress4X_wksp() : * Same as HUF_compress2(), but uses externally allocated `workSpace`. * `workspace` must have minimum alignment of 4, and be at least as large as following macro */ #define HUF_WORKSPACE_SIZE (6 << 10) #define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /** * The minimum workspace size for the `workSpace` used in * HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp(). * * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. * Buffer overflow errors may potentially occur if code modifications result in * a required workspace size greater than that specified in the following * macro. */ #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) #endif /* HUF_H_298734234 */ /* ****************************************************************** * WARNING !! * The following section contains advanced and experimental definitions * which shall never be used in the context of dll * because they are not guaranteed to remain stable in the future. * Only consider them in association with static linking. *******************************************************************/ #if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) #define HUF_H_HUF_STATIC_LINKING_ONLY /* *** Dependencies *** */ #include "mem.h" /* U32 */ /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ #define HUF_SYMBOLVALUE_MAX 255 #define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif /* **************************************** * Static allocation ******************************************/ /* HUF buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's Compression Table */ #define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ void* name##hv = &(name##hb); \ HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } #define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } /* **************************************** * Advanced decompression functions ******************************************/ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ /* **************************************** * HUF detailed API ******************************************/ /*! HUF_compress() does the following: 1. count symbol occurrence from source[] into table count[] using FSE_count() 2. (optional) refine tableLog using HUF_optimalTableLog() 3. build Huffman table from count using HUF_buildCTable() 4. save Huffman table to memory buffer using HUF_writeCTable() 5. encode the data stream using HUF_compress4X_usingCTable() The following API allows targeting specific sub-functions for advanced tasks. For example, it's possible to compress several blocks using the same 'CTable', or to save and regenerate 'CTable' using external methods. */ /* FSE_count() : find it within "fse.h" */ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); typedef enum { HUF_repeat_none, /**< Cannot use the previous table */ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ } HUF_repeat; /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. */ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); /*! HUF_readStats() : Read compact Huffman tree, saved by HUF_writeCTable(). `huffWeight` is destination buffer. @return : size read from `src` , or an error Code . Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); /** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ -size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize); +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); /* HUF_decompress() does the following: 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics 2. build Huffman table from save, using HUF_readDTableXn() 3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable */ /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-determined metrics. * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . * Assumption : 0 < cSrcSize < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /* single stream variants */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif /* HUF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif Index: vendor/zstd/dist/lib/common/mem.h =================================================================== --- vendor/zstd/dist/lib/common/mem.h (revision 325596) +++ vendor/zstd/dist/lib/common/mem.h (revision 325597) @@ -1,359 +1,360 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /*-**************************************** * Dependencies ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /*-**************************************** * Compiler specifics ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif #if defined(__GNUC__) # define MEM_STATIC static __inline __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /* code only tested on 32 and 64 bits systems */ #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } /*-************************************************************** * Basic Types *****************************************************************/ #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; typedef intptr_t iPtrDiff; typedef uintptr_t uPtrDiff; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; typedef ptrdiff_t iPtrDiff; typedef size_t uPtrDiff; #endif /*-************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS : * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard, by lying on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) __pragma( pack(push, 1) ) typedef union { U16 u16; U32 u32; U64 u64; size_t st; } unalign; __pragma( pack(pop) ) #else typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; #endif MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC size_t MEM_readST(const void* memPtr) { size_t val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { memcpy(memPtr, &value, sizeof(value)); } MEM_STATIC void MEM_write64(void* memPtr, U64 value) { memcpy(memPtr, &value, sizeof(value)); } #endif /* MEM_FORCE_MEMORY_ACCESS */ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | ((in << 8) & 0x00ff0000 ) | ((in >> 8) & 0x0000ff00 ) | ((in >> 24) & 0x000000ff ); #endif } MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | ((in << 40) & 0x00ff000000000000ULL) | ((in << 24) & 0x0000ff0000000000ULL) | ((in << 8) & 0x000000ff00000000ULL) | ((in >> 8) & 0x00000000ff000000ULL) | ((in >> 24) & 0x0000000000ff0000ULL) | ((in >> 40) & 0x000000000000ff00ULL) | ((in >> 56) & 0x00000000000000ffULL); #endif } MEM_STATIC size_t MEM_swapST(size_t in) { if (MEM_32bits()) return (size_t)MEM_swap32((U32)in); else return (size_t)MEM_swap64((U64)in); } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE24(const void* memPtr) { return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); } MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) { MEM_writeLE16(memPtr, (U16)val); ((BYTE*)memPtr)[2] = (BYTE)(val>>16); } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else return MEM_swap32(MEM_read32(memPtr)); } MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, val32); else MEM_write32(memPtr, MEM_swap32(val32)); } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else return MEM_swap64(MEM_read64(memPtr)); } MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, val64); else MEM_write64(memPtr, MEM_swap64(val64)); } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeLE32(memPtr, (U32)val); else MEM_writeLE64(memPtr, (U64)val); } /*=== Big endian r/w ===*/ MEM_STATIC U32 MEM_readBE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap32(MEM_read32(memPtr)); else return MEM_read32(memPtr); } MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) { if (MEM_isLittleEndian()) MEM_write32(memPtr, MEM_swap32(val32)); else MEM_write32(memPtr, val32); } MEM_STATIC U64 MEM_readBE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_swap64(MEM_read64(memPtr)); else return MEM_read64(memPtr); } MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) { if (MEM_isLittleEndian()) MEM_write64(memPtr, MEM_swap64(val64)); else MEM_write64(memPtr, val64); } MEM_STATIC size_t MEM_readBEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readBE32(memPtr); else return (size_t)MEM_readBE64(memPtr); } MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) { if (MEM_32bits()) MEM_writeBE32(memPtr, (U32)val); else MEM_writeBE64(memPtr, (U64)val); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ Index: vendor/zstd/dist/lib/common/pool.c =================================================================== --- vendor/zstd/dist/lib/common/pool.c (revision 325596) +++ vendor/zstd/dist/lib/common/pool.c (revision 325597) @@ -1,240 +1,255 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* ====== Dependencies ======= */ #include /* size_t */ #include /* malloc, calloc, free */ #include "pool.h" /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif #ifdef ZSTD_MULTITHREAD #include "threading.h" /* pthread adaptation */ /* A job is a function and an opaque argument */ typedef struct POOL_job_s { - POOL_function function; - void *opaque; + POOL_function function; + void *opaque; } POOL_job; struct POOL_ctx_s { + ZSTD_customMem customMem; /* Keep track of the threads */ - pthread_t *threads; + ZSTD_pthread_t *threads; size_t numThreads; /* The queue is a circular buffer */ POOL_job *queue; size_t queueHead; size_t queueTail; size_t queueSize; /* The number of threads working on jobs */ size_t numThreadsBusy; /* Indicates if the queue is empty */ int queueEmpty; /* The mutex protects the queue */ - pthread_mutex_t queueMutex; + ZSTD_pthread_mutex_t queueMutex; /* Condition variable for pushers to wait on when the queue is full */ - pthread_cond_t queuePushCond; + ZSTD_pthread_cond_t queuePushCond; /* Condition variables for poppers to wait on when the queue is empty */ - pthread_cond_t queuePopCond; + ZSTD_pthread_cond_t queuePopCond; /* Indicates if the queue is shutting down */ int shutdown; }; /* POOL_thread() : Work thread for the thread pool. Waits for jobs and executes them. @returns : NULL on failure else non-null. */ static void* POOL_thread(void* opaque) { POOL_ctx* const ctx = (POOL_ctx*)opaque; if (!ctx) { return NULL; } for (;;) { /* Lock the mutex and wait for a non-empty queue or until shutdown */ - pthread_mutex_lock(&ctx->queueMutex); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); while (ctx->queueEmpty && !ctx->shutdown) { - pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); } /* empty => shutting down: so stop */ if (ctx->queueEmpty) { - pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return opaque; } /* Pop a job off the queue */ { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; ctx->queueEmpty = ctx->queueHead == ctx->queueTail; /* Unlock the mutex, signal a pusher, and run the job */ - pthread_mutex_unlock(&ctx->queueMutex); - pthread_cond_signal(&ctx->queuePushCond); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePushCond); job.function(job.opaque); /* If the intended queue size was 0, signal after finishing job */ if (ctx->queueSize == 1) { - pthread_mutex_lock(&ctx->queueMutex); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; - pthread_mutex_unlock(&ctx->queueMutex); - pthread_cond_signal(&ctx->queuePushCond); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePushCond); } } } /* for (;;) */ /* Unreachable */ } -POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) { - POOL_ctx *ctx; +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { + POOL_ctx* ctx; /* Check the parameters */ if (!numThreads) { return NULL; } /* Allocate the context and zero initialize */ - ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx)); + ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); if (!ctx) { return NULL; } /* Initialize the job queue. * It needs one extra space since one space is wasted to differentiate empty * and full queues. */ ctx->queueSize = queueSize + 1; ctx->queue = (POOL_job*) malloc(ctx->queueSize * sizeof(POOL_job)); ctx->queueHead = 0; ctx->queueTail = 0; ctx->numThreadsBusy = 0; ctx->queueEmpty = 1; - (void)pthread_mutex_init(&ctx->queueMutex, NULL); - (void)pthread_cond_init(&ctx->queuePushCond, NULL); - (void)pthread_cond_init(&ctx->queuePopCond, NULL); + (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); + (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); + (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); ctx->shutdown = 0; /* Allocate space for the thread handles */ - ctx->threads = (pthread_t*)malloc(numThreads * sizeof(pthread_t)); + ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->numThreads = 0; + ctx->customMem = customMem; /* Check for errors */ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } /* Initialize the threads */ { size_t i; for (i = 0; i < numThreads; ++i) { - if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { ctx->numThreads = i; POOL_free(ctx); return NULL; } } ctx->numThreads = numThreads; } return ctx; } /*! POOL_join() : Shutdown the queue, wake any sleeping threads, and join all of the threads. */ -static void POOL_join(POOL_ctx *ctx) { +static void POOL_join(POOL_ctx* ctx) { /* Shut down the queue */ - pthread_mutex_lock(&ctx->queueMutex); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->shutdown = 1; - pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); /* Wake up sleeping threads */ - pthread_cond_broadcast(&ctx->queuePushCond); - pthread_cond_broadcast(&ctx->queuePopCond); + ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); + ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); /* Join all of the threads */ { size_t i; for (i = 0; i < ctx->numThreads; ++i) { - pthread_join(ctx->threads[i], NULL); + ZSTD_pthread_join(ctx->threads[i], NULL); } } } void POOL_free(POOL_ctx *ctx) { if (!ctx) { return; } POOL_join(ctx); - pthread_mutex_destroy(&ctx->queueMutex); - pthread_cond_destroy(&ctx->queuePushCond); - pthread_cond_destroy(&ctx->queuePopCond); - if (ctx->queue) free(ctx->queue); - if (ctx->threads) free(ctx->threads); - free(ctx); + ZSTD_pthread_mutex_destroy(&ctx->queueMutex); + ZSTD_pthread_cond_destroy(&ctx->queuePushCond); + ZSTD_pthread_cond_destroy(&ctx->queuePopCond); + ZSTD_free(ctx->queue, ctx->customMem); + ZSTD_free(ctx->threads, ctx->customMem); + ZSTD_free(ctx, ctx->customMem); } size_t POOL_sizeof(POOL_ctx *ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) - + ctx->numThreads * sizeof(pthread_t); + + ctx->numThreads * sizeof(ZSTD_pthread_t); } /** * Returns 1 if the queue is full and 0 otherwise. * * If the queueSize is 1 (the pool was created with an intended queueSize of 0), * then a queue is empty if there is a thread free and no job is waiting. */ static int isQueueFull(POOL_ctx const* ctx) { if (ctx->queueSize > 1) { return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); } else { return ctx->numThreadsBusy == ctx->numThreads || !ctx->queueEmpty; } } void POOL_add(void* ctxVoid, POOL_function function, void *opaque) { POOL_ctx* const ctx = (POOL_ctx*)ctxVoid; if (!ctx) { return; } - pthread_mutex_lock(&ctx->queueMutex); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); { POOL_job const job = {function, opaque}; /* Wait until there is space in the queue for the new job */ while (isQueueFull(ctx) && !ctx->shutdown) { - pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); } /* The queue is still going => there is space */ if (!ctx->shutdown) { ctx->queueEmpty = 0; ctx->queue[ctx->queueTail] = job; ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; } } - pthread_mutex_unlock(&ctx->queueMutex); - pthread_cond_signal(&ctx->queuePopCond); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePopCond); } #else /* ZSTD_MULTITHREAD not defined */ /* No multi-threading support */ /* We don't need any data, but if it is empty malloc() might return NULL. */ struct POOL_ctx_s { - int data; + int dummy; }; +static POOL_ctx g_ctx; POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { - (void)numThreads; - (void)queueSize; - return (POOL_ctx*)malloc(sizeof(POOL_ctx)); + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { + (void)numThreads; + (void)queueSize; + (void)customMem; + return &g_ctx; +} + void POOL_free(POOL_ctx* ctx) { - free(ctx); + assert(!ctx || ctx == &g_ctx); + (void)ctx; } void POOL_add(void* ctx, POOL_function function, void* opaque) { - (void)ctx; - function(opaque); + (void)ctx; + function(opaque); } size_t POOL_sizeof(POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ + assert(ctx == &g_ctx); return sizeof(*ctx); } #endif /* ZSTD_MULTITHREAD */ Index: vendor/zstd/dist/lib/common/pool.h =================================================================== --- vendor/zstd/dist/lib/common/pool.h (revision 325596) +++ vendor/zstd/dist/lib/common/pool.h (revision 325597) @@ -1,61 +1,65 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef POOL_H #define POOL_H #if defined (__cplusplus) extern "C" { #endif #include /* size_t */ +#include "zstd_internal.h" /* ZSTD_customMem */ typedef struct POOL_ctx_s POOL_ctx; /*! POOL_create() : * Create a thread pool with at most `numThreads` threads. * `numThreads` must be at least 1. * The maximum number of queued jobs before blocking is `queueSize`. * @return : POOL_ctx pointer on success, else NULL. */ POOL_ctx *POOL_create(size_t numThreads, size_t queueSize); + +POOL_ctx *POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); /*! POOL_free() : Free a thread pool returned by POOL_create(). */ void POOL_free(POOL_ctx *ctx); /*! POOL_sizeof() : return memory usage of pool returned by POOL_create(). */ size_t POOL_sizeof(POOL_ctx *ctx); /*! POOL_function : The function type that can be added to a thread pool. */ typedef void (*POOL_function)(void *); /*! POOL_add_function : The function type for a generic thread pool add function. */ typedef void (*POOL_add_function)(void *, POOL_function, void *); /*! POOL_add() : Add the job `function(opaque)` to the thread pool. Possibly blocks until there is room in the queue. Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed. */ void POOL_add(void *ctx, POOL_function function, void *opaque); #if defined (__cplusplus) } #endif #endif Index: vendor/zstd/dist/lib/common/threading.c =================================================================== --- vendor/zstd/dist/lib/common/threading.c (revision 325596) +++ vendor/zstd/dist/lib/common/threading.c (revision 325597) @@ -1,79 +1,75 @@ /** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt */ /** * This file will hold wrapper for systems, which do not support pthreads */ -/* When ZSTD_MULTITHREAD is not defined, this file would become an empty translation unit. -* Include some ISO C header code to prevent this and portably avoid related warnings. -* (Visual C++: C4206 / GCC: -Wpedantic / Clang: -Wempty-translation-unit) -*/ -#include +/* create fake symbol to avoid empty trnaslation unit warning */ +int g_ZSTD_threading_useles_symbol; - #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ /* === Dependencies === */ #include #include #include "threading.h" /* === Implementation === */ static unsigned __stdcall worker(void *arg) { - pthread_t* const thread = (pthread_t*) arg; + ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; thread->arg = thread->start_routine(thread->arg); return 0; } -int pthread_create(pthread_t* thread, const void* unused, +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg) { (void)unused; thread->arg = arg; thread->start_routine = start_routine; thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); if (!thread->handle) return errno; else return 0; } -int _pthread_join(pthread_t * thread, void **value_ptr) +int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) { DWORD result; - if (!thread->handle) return 0; + if (!thread.handle) return 0; - result = WaitForSingleObject(thread->handle, INFINITE); + result = WaitForSingleObject(thread.handle, INFINITE); switch (result) { case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread->arg; + if (value_ptr) *value_ptr = thread.arg; return 0; case WAIT_ABANDONED: return EINVAL; default: return GetLastError(); } } #endif /* ZSTD_MULTITHREAD */ Index: vendor/zstd/dist/lib/common/threading.h =================================================================== --- vendor/zstd/dist/lib/common/threading.h (revision 325596) +++ vendor/zstd/dist/lib/common/threading.h (revision 325597) @@ -1,103 +1,123 @@ /** * Copyright (c) 2016 Tino Reichardt * All rights reserved. * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt */ #ifndef THREADING_H_938743 #define THREADING_H_938743 #if defined (__cplusplus) extern "C" { #endif #if defined(ZSTD_MULTITHREAD) && defined(_WIN32) /** * Windows minimalist Pthread Wrapper, based on : * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html */ #ifdef WINVER # undef WINVER #endif #define WINVER 0x0600 #ifdef _WIN32_WINNT # undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0600 #ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ #include +#undef ERROR +#define ERROR(name) ZSTD_ERROR(name) + /* mutex */ -#define pthread_mutex_t CRITICAL_SECTION -#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0) -#define pthread_mutex_destroy(a) DeleteCriticalSection((a)) -#define pthread_mutex_lock(a) EnterCriticalSection((a)) -#define pthread_mutex_unlock(a) LeaveCriticalSection((a)) +#define ZSTD_pthread_mutex_t CRITICAL_SECTION +#define ZSTD_pthread_mutex_init(a, b) (InitializeCriticalSection((a)), 0) +#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) +#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) /* condition variable */ -#define pthread_cond_t CONDITION_VARIABLE -#define pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0) -#define pthread_cond_destroy(a) /* No delete */ -#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) -#define pthread_cond_signal(a) WakeConditionVariable((a)) -#define pthread_cond_broadcast(a) WakeAllConditionVariable((a)) +#define ZSTD_pthread_cond_t CONDITION_VARIABLE +#define ZSTD_pthread_cond_init(a, b) (InitializeConditionVariable((a)), 0) +#define ZSTD_pthread_cond_destroy(a) /* No delete */ +#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) +#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) -/* pthread_create() and pthread_join() */ +/* ZSTD_pthread_create() and ZSTD_pthread_join() */ typedef struct { HANDLE handle; void* (*start_routine)(void*); void* arg; -} pthread_t; +} ZSTD_pthread_t; -int pthread_create(pthread_t* thread, const void* unused, +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, void* (*start_routine) (void*), void* arg); -#define pthread_join(a, b) _pthread_join(&(a), (b)) -int _pthread_join(pthread_t* thread, void** value_ptr); +int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); /** * add here more wrappers as required */ #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ /* === POSIX Systems === */ # include +#define ZSTD_pthread_mutex_t pthread_mutex_t +#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) +#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) + +#define ZSTD_pthread_cond_t pthread_cond_t +#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) +#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) + #else /* ZSTD_MULTITHREAD not defined */ /* No multithreading support */ -#define pthread_mutex_t int /* #define rather than typedef, because sometimes pthread support is implicit, resulting in duplicated symbols */ -#define pthread_mutex_init(a,b) ((void)a, 0) -#define pthread_mutex_destroy(a) -#define pthread_mutex_lock(a) -#define pthread_mutex_unlock(a) +typedef int ZSTD_pthread_mutex_t; +#define ZSTD_pthread_mutex_init(a, b) ((void)a, 0) +#define ZSTD_pthread_mutex_destroy(a) +#define ZSTD_pthread_mutex_lock(a) +#define ZSTD_pthread_mutex_unlock(a) -#define pthread_cond_t int -#define pthread_cond_init(a,b) ((void)a, 0) -#define pthread_cond_destroy(a) -#define pthread_cond_wait(a,b) -#define pthread_cond_signal(a) -#define pthread_cond_broadcast(a) +typedef int ZSTD_pthread_cond_t; +#define ZSTD_pthread_cond_init(a, b) ((void)a, 0) +#define ZSTD_pthread_cond_destroy(a) +#define ZSTD_pthread_cond_wait(a, b) +#define ZSTD_pthread_cond_signal(a) +#define ZSTD_pthread_cond_broadcast(a) -/* do not use pthread_t */ +/* do not use ZSTD_pthread_t */ #endif /* ZSTD_MULTITHREAD */ #if defined (__cplusplus) } #endif #endif /* THREADING_H_938743 */ Index: vendor/zstd/dist/lib/common/zstd_common.c =================================================================== --- vendor/zstd/dist/lib/common/zstd_common.c (revision 325596) +++ vendor/zstd/dist/lib/common/zstd_common.c (revision 325597) @@ -1,80 +1,80 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include /* malloc, calloc, free */ #include /* memset */ #include "error_private.h" -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" +#include "zstd_internal.h" /*-**************************************** * Version ******************************************/ unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } /*-**************************************** * ZSTD Error Management ******************************************/ /*! ZSTD_isError() : * tells if a return value is an error code */ unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /*! ZSTD_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } /*! ZSTD_getError() : * convert a `size_t` function result into a proper ZSTD_errorCode enum */ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } /*! ZSTD_getErrorString() : * provides error code string from enum */ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } /*=************************************************************** * Custom allocator ****************************************************************/ void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) return customMem.customAlloc(customMem.opaque, size); return malloc(size); } void* ZSTD_calloc(size_t size, ZSTD_customMem customMem) { if (customMem.customAlloc) { /* calloc implemented as malloc+memset; * not as efficient as calloc, but next best guess for custom malloc */ void* const ptr = customMem.customAlloc(customMem.opaque, size); memset(ptr, 0, size); return ptr; } return calloc(1, size); } void ZSTD_free(void* ptr, ZSTD_customMem customMem) { if (ptr!=NULL) { if (customMem.customFree) customMem.customFree(customMem.opaque, ptr); else free(ptr); } } Index: vendor/zstd/dist/lib/common/zstd_errors.h =================================================================== --- vendor/zstd/dist/lib/common/zstd_errors.h (revision 325596) +++ vendor/zstd/dist/lib/common/zstd_errors.h (revision 325597) @@ -1,81 +1,83 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_ERRORS_H_398273423 #define ZSTD_ERRORS_H_398273423 #if defined (__cplusplus) extern "C" { #endif /*===== dependency =====*/ #include /* size_t */ /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ #ifndef ZSTDERRORLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZSTDERRORLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY #endif /*-**************************************** * error codes list * note : this API is still considered unstable * and shall not be used with a dynamic library. * only static linking is allowed ******************************************/ typedef enum { ZSTD_error_no_error = 0, ZSTD_error_GENERIC = 1, ZSTD_error_prefix_unknown = 10, ZSTD_error_version_unsupported = 12, ZSTD_error_frameParameter_unsupported = 14, ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, ZSTD_error_dictionary_corrupted = 30, ZSTD_error_dictionary_wrong = 32, ZSTD_error_dictionaryCreation_failed = 34, ZSTD_error_parameter_unsupported = 40, ZSTD_error_parameter_outOfBound = 42, ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, + /* following error codes are not stable and may be removed or changed in a future version */ ZSTD_error_frameIndex_tooLarge = 100, ZSTD_error_seekableIO = 102, - ZSTD_error_maxCode = 120 /* never EVER use this value directly, it may change in future versions! Use ZSTD_isError() instead */ + ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; /*! ZSTD_getErrorCode() : convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, which can be used to compare with enum list published above */ ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ #if defined (__cplusplus) } #endif #endif /* ZSTD_ERRORS_H_398273423 */ Index: vendor/zstd/dist/lib/common/zstd_internal.h =================================================================== --- vendor/zstd/dist/lib/common/zstd_internal.h (revision 325596) +++ vendor/zstd/dist/lib/common/zstd_internal.h (revision 325597) @@ -1,335 +1,409 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE /*-************************************* * Dependencies ***************************************/ #include "compiler.h" #include "mem.h" #include "error_private.h" #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" #ifndef XXH_STATIC_LINKING_ONLY # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ #endif #include "xxhash.h" /* XXH_reset, update, digest */ +#if defined (__cplusplus) +extern "C" { +#endif + + /*-************************************* * Debug ***************************************/ #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1) # include #else # ifndef assert # define assert(condition) ((void)0) # endif #endif #define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; } #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2) # include /* recommended values for ZSTD_DEBUG display levels : * 1 : no display, enables assert() only * 2 : reserved for currently active debugging path * 3 : events once per object lifetime (CCtx, CDict) * 4 : events once per frame * 5 : events once per block * 6 : events once per sequence (*very* verbose) */ # define DEBUGLOG(l, ...) { \ if (l<=ZSTD_DEBUG) { \ fprintf(stderr, __FILE__ ": "); \ fprintf(stderr, __VA_ARGS__); \ fprintf(stderr, " \n"); \ } } #else # define DEBUGLOG(l, ...) {} /* disabled */ #endif /*-************************************* * shared macros ***************************************/ #undef MIN #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) #define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ #define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ /*-************************************* * Common constants ***************************************/ #define ZSTD_OPT_NUM (1<<12) #define ZSTD_REP_NUM 3 /* number of repcodes */ #define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) #define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define BIT1 2 #define BIT0 1 #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 +#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */ static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; +#define ZSTD_FRAMEIDSIZE 4 +static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE; /* magic number size */ + #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ #define HufLog 12 typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; #define LONGNBSEQ 0x7F00 #define MINMATCH 3 #define Litbits 8 #define MaxLit ((1<= 3) /* GCC Intrinsic */ - return 31 - __builtin_clz(val); + return 31 - __builtin_clz(val); # else /* Software version */ - static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - int r; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; - return r; + static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + int r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; + return r; # endif + } } /* hidden functions */ /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /*! ZSTD_initCStream_internal() : * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. * @return : 0, or an error code */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_parameters params, unsigned long long pledgedSrcSize); + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); /*! ZSTD_compressStream_generic() : * Private use only. To be called from zstdmt_compress.c in single-thread mode. */ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode); -/*! ZSTD_getParamsFromCDict() : +/*! ZSTD_getCParamsFromCDict() : * as the name implies */ -ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict); +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); +/* ZSTD_compressBegin_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictMode_e dictMode, + ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize); +/* ZSTD_compress_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_CCtx_params params); + typedef struct { blockType_e blockType; U32 lastBlock; U32 origSize; } blockProperties_t; /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr); +#if defined (__cplusplus) +} +#endif #endif /* ZSTD_CCOMMON_H_MODULE */ Index: vendor/zstd/dist/lib/compress/fse_compress.c =================================================================== --- vendor/zstd/dist/lib/compress/fse_compress.c (revision 325596) +++ vendor/zstd/dist/lib/compress/fse_compress.c (revision 325597) @@ -1,839 +1,841 @@ /* ****************************************************************** FSE : Finite State Entropy encoder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ #include "bitstream.h" #include "compiler.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #include "error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define FSE_isError ERR_isError #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; U32 highThreshold = tableSize-1; /* CTable header */ if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); tableU16[-2] = (U16) tableLog; tableU16[-1] = (U16) maxSymbolValue; /* For explanations on how to distribute symbol values over the table : * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ /* symbol start positions */ { U32 u; cumul[0] = 0; for (u=1; u<=maxSymbolValue+1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { cumul[u] = cumul[u-1] + normalizedCounter[u-1]; } } cumul[maxSymbolValue+1] = tableSize+1; } /* Spread symbols */ { U32 position = 0; U32 symbol; for (symbol=0; symbol<=maxSymbolValue; symbol++) { int nbOccurences; for (nbOccurences=0; nbOccurences highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ } /* Build table */ { U32 u; for (u=0; u> 3) + 3; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, unsigned writeIsSafe) { BYTE* const ostart = (BYTE*) header; BYTE* out = ostart; BYTE* const oend = ostart + headerBufferSize; int nbBits; const int tableSize = 1 << tableLog; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; bitStream = 0; bitCount = 0; /* Table Size */ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; bitCount += 4; /* Init */ remaining = tableSize+1; /* +1 for extra accuracy */ threshold = tableSize; nbBits = tableLog+1; while (remaining>1) { /* stops at 1 */ if (previous0) { unsigned start = charnum; while (!normalizedCounter[charnum]) charnum++; while (charnum >= start+24) { start+=24; bitStream += 0xFFFFU << bitCount; if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE) bitStream; out[1] = (BYTE)(bitStream>>8); out+=2; bitStream>>=16; } while (charnum >= start+3) { start+=3; bitStream += 3 << bitCount; bitCount += 2; } bitStream += (charnum-start) << bitCount; bitCount += 2; if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } { int count = normalizedCounter[charnum++]; int const max = (2*threshold-1)-remaining; remaining -= count < 0 ? -count : count; count++; /* +1 for extra accuracy */ if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ bitStream += count << bitCount; bitCount += nbBits; bitCount -= (count>=1; } if (bitCount>16) { if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } /* flush remaining bitStream */ if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out+= (bitCount+7) /8; if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); return (out-ostart); } size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); } /*-************************************************************** * Counting histogram ****************************************************************/ /*! FSE_count_simple This function counts byte values within `src`, and store the histogram into table `count`. It doesn't use any additional memory. But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. For this reason, prefer using a table `count` with 256 elements. @return : count of most numerous element */ size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* const end = ip + srcSize; unsigned maxSymbolValue = *maxSymbolValuePtr; unsigned max=0; memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } while (ip max) max = count[s]; } return (size_t)max; } /* FSE_count_parallel_wksp() : * Same as FSE_count_parallel(), but using an externally provided scratch buffer. * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ static size_t FSE_count_parallel_wksp( unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, unsigned checkMax, unsigned* const workSpace) { const BYTE* ip = (const BYTE*)source; const BYTE* const iend = ip+sourceSize; unsigned maxSymbolValue = *maxSymbolValuePtr; unsigned max=0; U32* const Counting1 = workSpace; U32* const Counting2 = Counting1 + 256; U32* const Counting3 = Counting2 + 256; U32* const Counting4 = Counting3 + 256; memset(Counting1, 0, 4*256*sizeof(unsigned)); /* safety checks */ if (!sourceSize) { memset(count, 0, maxSymbolValue + 1); *maxSymbolValuePtr = 0; return 0; } if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ /* by stripes of 16 bytes */ { U32 cached = MEM_read32(ip); ip += 4; while (ip < iend-15) { U32 c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; Counting2[(BYTE)(c>>8) ]++; Counting3[(BYTE)(c>>16)]++; Counting4[ c>>24 ]++; } ip-=4; } /* finish last symbols */ while (ipmaxSymbolValue; s--) { Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); } } { U32 s; for (s=0; s<=maxSymbolValue; s++) { count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; if (count[s] > max) max = count[s]; } } while (!count[maxSymbolValue]) maxSymbolValue--; *maxSymbolValuePtr = maxSymbolValue; return (size_t)max; } /* FSE_countFast_wksp() : * Same as FSE_countFast(), but using an externally provided scratch buffer. * `workSpace` size must be table of >= `1024` unsigned */ size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, unsigned* workSpace) { if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); } /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize) { unsigned tmpCounters[1024]; return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters); } /* FSE_count_wksp() : * Same as FSE_count(), but using an externally provided scratch buffer. * `workSpace` size must be table of >= `1024` unsigned */ size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* source, size_t sourceSize, unsigned* workSpace) { if (*maxSymbolValuePtr < 255) return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); *maxSymbolValuePtr = 255; return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); } size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) { unsigned tmpCounters[1024]; return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters); } /*-************************************************************** * FSE Compression Code ****************************************************************/ /*! FSE_sizeof_CTable() : FSE_CTable is a variable size structure which contains : `U16 tableLog;` `U16 maxSymbolValue;` `U16 nextStateNumber[1 << tableLog];` // This size is variable `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable Allocation is manual (C standard does not support variable-size structures). */ size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) { if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); } FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) { size_t size; if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); return (FSE_CTable*)malloc(size); } void FSE_freeCTable (FSE_CTable* ct) { free(ct); } /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + assert(srcSize > 1); /* Not supported, RLE should be used instead */ return minBits; } unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) { U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; U32 tableLog = maxTableLog; U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); + assert(srcSize > 1); /* Not supported, RLE should be used instead */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; return tableLog; } unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); } /* Secondary normalization method. To be used when primary method fails. */ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) { short const NOT_YET_ASSIGNED = -2; U32 s; U32 distributed = 0; U32 ToDistribute; /* Init */ U32 const lowThreshold = (U32)(total >> tableLog); U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == 0) { norm[s]=0; continue; } if (count[s] <= lowThreshold) { norm[s] = -1; distributed++; total -= count[s]; continue; } if (count[s] <= lowOne) { norm[s] = 1; distributed++; total -= count[s]; continue; } norm[s]=NOT_YET_ASSIGNED; } ToDistribute = (1 << tableLog) - distributed; if ((total / ToDistribute) > lowOne) { /* risk of rounding to zero */ lowOne = (U32)((total * 3) / (ToDistribute * 2)); for (s=0; s<=maxSymbolValue; s++) { if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { norm[s] = 1; distributed++; total -= count[s]; continue; } } ToDistribute = (1 << tableLog) - distributed; } if (distributed == maxSymbolValue+1) { /* all values are pretty poor; probably incompressible data (should have already been detected); find max, then give all remaining points to max */ U32 maxV = 0, maxC = 0; for (s=0; s<=maxSymbolValue; s++) if (count[s] > maxC) maxV=s, maxC=count[s]; norm[maxV] += (short)ToDistribute; return 0; } if (total == 0) { /* all of the symbols were low enough for the lowOne or lowThreshold */ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) if (norm[s] > 0) ToDistribute--, norm[s]++; return 0; } { U64 const vStepLog = 62 - tableLog; U64 const mid = (1ULL << (vStepLog-1)) - 1; U64 const rStep = ((((U64)1<> vStepLog); U32 const sEnd = (U32)(end >> vStepLog); U32 const weight = sEnd - sStart; if (weight < 1) return ERROR(GENERIC); norm[s] = (short)weight; tmpTotal = end; } } } return 0; } size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t total, unsigned maxSymbolValue) { /* Sanity checks */ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ - { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; U64 const scale = 62 - tableLog; U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ U64 const vStep = 1ULL<<(scale-20); int stillToDistribute = 1<> tableLog); for (s=0; s<=maxSymbolValue; s++) { if (count[s] == total) return 0; /* rle special case */ if (count[s] == 0) { normalizedCounter[s]=0; continue; } if (count[s] <= lowThreshold) { normalizedCounter[s] = -1; stillToDistribute--; } else { short proba = (short)((count[s]*step) >> scale); if (proba<8) { U64 restToBeat = vStep * rtbTable[proba]; proba += (count[s]*step) - ((U64)proba< restToBeat; } if (proba > largestP) largestP=proba, largest=s; normalizedCounter[s] = proba; stillToDistribute -= proba; } } if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { /* corner case, need another normalization method */ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); if (FSE_isError(errorCode)) return errorCode; } else normalizedCounter[largest] += (short)stillToDistribute; } #if 0 { /* Print Table (debug) */ U32 s; U32 nTotal = 0; for (s=0; s<=maxSymbolValue; s++) printf("%3i: %4i \n", s, normalizedCounter[s]); for (s=0; s<=maxSymbolValue; s++) nTotal += abs(normalizedCounter[s]); if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* header */ tableU16[-2] = (U16) nbBits; tableU16[-1] = (U16) maxSymbolValue; /* Build table */ for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); FSE_FLUSHBITS(&bitC); } /* 2 or 4 encoding per loop */ while ( ip>istart ) { FSE_encodeSymbol(&bitC, &CState2, *--ip); if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ FSE_FLUSHBITS(&bitC); FSE_encodeSymbol(&bitC, &CState1, *--ip); if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ FSE_encodeSymbol(&bitC, &CState2, *--ip); FSE_encodeSymbol(&bitC, &CState1, *--ip); } FSE_FLUSHBITS(&bitC); } FSE_flushCState(&bitC, &CState2); FSE_flushCState(&bitC, &CState1); return BIT_closeCStream(&bitC); } size_t FSE_compress_usingCTable (void* dst, size_t dstSize, const void* src, size_t srcSize, const FSE_CTable* ct) { unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); if (fast) return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); else return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); } size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } /* FSE_compress_wksp() : * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` size must be `(1< not compressible */ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ } tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); /* Write table description header */ { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += nc_err; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } /* check compressibility */ if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; return op-ostart; } typedef struct { FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; } fseWkspMax_t; size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) { fseWkspMax_t scratchBuffer; FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); } size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); } #endif /* FSE_COMMONDEFS_ONLY */ Index: vendor/zstd/dist/lib/compress/huf_compress.c =================================================================== --- vendor/zstd/dist/lib/compress/huf_compress.c (revision 325596) +++ vendor/zstd/dist/lib/compress/huf_compress.c (revision 325597) @@ -1,689 +1,690 @@ /* ****************************************************************** Huffman encoder, part of New Generation Entropy library Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /* ************************************************************** * Includes ****************************************************************/ #include /* memcpy, memset */ #include /* printf (debug) */ #include "bitstream.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "fse.h" /* header compression */ #define HUF_STATIC_LINKING_ONLY #include "huf.h" #include "error_private.h" /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } /* ************************************************************** * Utils ****************************************************************/ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) { return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); } /* ******************************************************* * HUF : Huffman block compression *********************************************************/ /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const oend = ostart + dstSize; U32 maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; BYTE scratchBuffer[1< not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); /* Write table description header */ { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } return op-ostart; } struct HUF_CElt_s { U16 val; BYTE nbBits; }; /* typedef'd to HUF_CElt within "huf.h" */ /*! HUF_writeCTable() : `CTable` : Huffman tree to save, using huf representation. @return : size of saved CTable */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) { BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; BYTE* op = (BYTE*)dst; U32 n; /* check conditions */ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); /* convert to weight */ bitsToWeight[0] = 0; for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; } } /* write raw values as 4-bits (max : 15) */ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); - if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); + if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } } /* fill nbBits */ { U32 n; for (n=0; nn=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } /* assign value within rank, symbol order */ - { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } + { U32 n; for (n=0; n maxNbBits */ /* there are several too large elements (at least >= 2) */ { int totalCost = 0; const U32 baseCost = 1 << (largestBits - maxNbBits); U32 n = lastNonNull; while (huffNode[n].nbBits > maxNbBits) { totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); huffNode[n].nbBits = (BYTE)maxNbBits; n --; } /* n stops at huffNode[n].nbBits <= maxNbBits */ while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ /* renorm totalCost */ totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ /* repay normalized cost */ { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; int pos; /* Get pos of last (smallest) symbol per rank */ memset(rankLast, 0xF0, sizeof(rankLast)); { U32 currentNbBits = maxNbBits; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currentNbBits) continue; currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ rankLast[maxNbBits-currentNbBits] = pos; } } while (totalCost > 0) { U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { U32 highPos = rankLast[nBitsToDecrease]; U32 lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; if (lowPos == noSymbol) break; { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) nBitsToDecrease ++; totalCost -= 1 << (nBitsToDecrease-1); if (rankLast[nBitsToDecrease-1] == noSymbol) rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ huffNode[rankLast[nBitsToDecrease]].nbBits ++; if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol; else { rankLast[nBitsToDecrease]--; if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ } } /* while (totalCost > 0) */ while (totalCost < 0) { /* Sometimes, cost correction overshoot */ if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ while (huffNode[n].nbBits == maxNbBits) n--; huffNode[n+1].nbBits--; rankLast[1] = n+1; totalCost++; continue; } huffNode[ rankLast[1] + 1 ].nbBits--; rankLast[1]++; totalCost ++; } } } /* there are several too large elements (at least >= 2) */ return maxNbBits; } typedef struct { U32 base; U32 current; } rankPos; static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) { rankPos rank[32]; U32 n; memset(rank, 0, sizeof(rank)); for (n=0; n<=maxSymbolValue; n++) { U32 r = BIT_highbit32(count[n] + 1); rank[r].base ++; } for (n=30; n>0; n--) rank[n-1].base += rank[n].base; for (n=0; n<32; n++) rank[n].current = rank[n].base; for (n=0; n<=maxSymbolValue; n++) { U32 const c = count[n]; U32 const r = BIT_highbit32(c+1) + 1; U32 pos = rank[r].current++; while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } } /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1]; size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) { nodeElt* const huffNode0 = (nodeElt*)workSpace; nodeElt* const huffNode = huffNode0+1; U32 n, nonNullRank; int lowS, lowN; U16 nodeNb = STARTNODE; U32 nodeRoot; /* safety checks */ if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC); memset(huffNode0, 0, sizeof(huffNodeTable)); /* sort, decreasing order */ HUF_sort(huffNode, count, maxSymbolValue); /* init for parents */ nonNullRank = maxSymbolValue; while(huffNode[nonNullRank].count == 0) nonNullRank--; lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; nodeNb++; lowS-=2; for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ /* create parents */ while (nodeNb <= nodeRoot) { U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; huffNode[n1].parent = huffNode[n2].parent = nodeNb; nodeNb++; } /* distribute weights (unlimited tree height) */ huffNode[nodeRoot].nbBits = 0; for (n=nodeRoot-1; n>=STARTNODE; n--) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; for (n=0; n<=nonNullRank; n++) huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; /* enforce maxTableLog */ maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); /* fill result into tree (val, nbBits) */ { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ for (n=0; n<=nonNullRank; n++) nbPerRank[huffNode[n].nbBits]++; /* determine stating value per rank */ { U16 min = 0; for (n=maxNbBits; n>0; n--) { valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; min >>= 1; } } for (n=0; n<=maxSymbolValue; n++) tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ for (n=0; n<=maxSymbolValue; n++) tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ } return maxNbBits; } /** HUF_buildCTable() : * Note : count is used before tree is written, so they can safely overlap */ size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) { huffNodeTable nodeTable; return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); } static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { nbBits += CTable[s].nbBits * count[s]; } return nbBits >> 3; } static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { bad |= (count[s] != 0) & (CTable[s].nbBits == 0); } return !bad; } static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) { BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } #define HUF_FLUSHBITS(s) BIT_flushBits(s) #define HUF_FLUSHBITS_1(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) #define HUF_FLUSHBITS_2(stream) \ if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; size_t n; BIT_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); if (HUF_isError(initErr)) return 0; } n = srcSize & ~3; /* join to mod 4 */ switch (srcSize & 3) { case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); HUF_FLUSHBITS_2(&bitC); /* fall-through */ case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); HUF_FLUSHBITS_1(&bitC); /* fall-through */ case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); HUF_FLUSHBITS(&bitC); /* fall-through */ case 0 : /* fall-through */ default: break; } for (; n>0; n-=4) { /* note : n&3==0 at this stage */ HUF_encodeSymbol(&bitC, ip[n- 1], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 2], CTable); HUF_FLUSHBITS_2(&bitC); HUF_encodeSymbol(&bitC, ip[n- 3], CTable); HUF_FLUSHBITS_1(&bitC); HUF_encodeSymbol(&bitC, ip[n- 4], CTable); HUF_FLUSHBITS(&bitC); } return BIT_closeCStream(&bitC); } size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); if (cSize==0) return 0; op += cSize; } return op-ostart; } static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, unsigned singleStream, const HUF_CElt* CTable) { size_t const cSize = singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); if (HUF_isError(cSize)) { return cSize; } if (cSize==0) { return 0; } /* uncompressible */ op += cSize; /* check compressibility */ if ((size_t)(op-ostart) >= srcSize-1) { return 0; } return op-ostart; } /* `workSpace` must a table of at least 1024 unsigned */ static size_t HUF_compress_internal ( void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, unsigned singleStream, void* workSpace, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat) { BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; U32* count; size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); HUF_CElt* CTable; size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); /* checks & inits */ if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ if (!dstSize) return 0; /* cannot fit within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; count = (U32*)workSpace; workSpace = (BYTE*)workSpace + countSize; wkspSize -= countSize; CTable = (HUF_CElt*)workSpace; workSpace = (BYTE*)workSpace + CTableSize; wkspSize -= CTableSize; /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); } /* Scan input and build symbol stats */ { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ } /* Check validity of previous table */ if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { *repeat = HUF_repeat_none; } /* Heuristic : use existing table for small inputs */ if (preferRepeat && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); } /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); huffLog = (U32)maxBits; /* Zero the unused symbols so we can check it for validity */ memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); } /* Write table description header */ { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); /* Check if using the previous table will be beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); } } /* Use the new table */ if (hSize + 12ul >= srcSize) { return 0; } op += hSize; if (repeat) { *repeat = HUF_repeat_none; } if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */ } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); } size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat); } size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[1024]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); } size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat); } size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { unsigned workSpace[1024]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT); } Index: vendor/zstd/dist/lib/compress/zstd_compress.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_compress.c (revision 325596) +++ vendor/zstd/dist/lib/compress/zstd_compress.c (revision 325597) @@ -1,4203 +1,3023 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Tuning parameters ***************************************/ #ifndef ZSTD_CLEVEL_DEFAULT # define ZSTD_CLEVEL_DEFAULT 3 #endif /*-************************************* * Dependencies ***************************************/ #include /* memset */ #include "mem.h" #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" -#include "zstd_internal.h" /* includes zstd.h */ -#include "zstdmt_compress.h" +#include "zstd_compress.h" +#include "zstd_fast.h" +#include "zstd_double_fast.h" +#include "zstd_lazy.h" +#include "zstd_opt.h" +#include "zstd_ldm.h" /*-************************************* -* Constants -***************************************/ -static const U32 g_searchStrength = 8; /* control skip over incompressible data */ -#define HASH_READ_SIZE 8 -typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; - - -/*-************************************* * Helper functions ***************************************/ size_t ZSTD_compressBound(size_t srcSize) { - size_t const lowLimit = 256 KB; - size_t const margin = (srcSize < lowLimit) ? (lowLimit-srcSize) >> 12 : 0; /* from 64 to 0 */ - return srcSize + (srcSize >> 8) + margin; + return ZSTD_COMPRESSBOUND(srcSize); } /*-************************************* * Sequence storage ***************************************/ static void ZSTD_resetSeqStore(seqStore_t* ssPtr) { ssPtr->lit = ssPtr->litStart; ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthID = 0; } /*-************************************* * Context memory management ***************************************/ -typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; - struct ZSTD_CDict_s { void* dictBuffer; const void* dictContent; size_t dictContentSize; ZSTD_CCtx* refContext; }; /* typedef'd to ZSTD_CDict within "zstd.h" */ -struct ZSTD_CCtx_s { - const BYTE* nextSrc; /* next block here to continue on current prefix */ - const BYTE* base; /* All regular indexes relative to this position */ - const BYTE* dictBase; /* extDict indexes relative to this position */ - U32 dictLimit; /* below that point, need extDict */ - U32 lowLimit; /* below that point, no more data */ - U32 nextToUpdate; /* index from which to continue dictionary update */ - U32 nextToUpdate3; /* index from which to continue dictionary update */ - U32 hashLog3; /* dispatch table : larger == faster, more memory */ - U32 loadedDictEnd; /* index of end of dictionary */ - U32 forceWindow; /* force back-references to respect limit of 1<customMem = customMem; - cctx->compressionLevel = ZSTD_CLEVEL_DEFAULT; + cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; ZSTD_STATIC_ASSERT(zcss_init==0); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); return cctx; } ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize) { - ZSTD_CCtx* cctx = (ZSTD_CCtx*) workspace; + ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace; if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */ cctx->staticSize = workspaceSize; cctx->workSpace = (void*)(cctx+1); cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx); /* entropy space (never moves) */ if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL; - assert(((size_t)cctx->workSpace & 7) == 0); /* ensure correct alignment */ + assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace; return cctx; } size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support free on NULL */ if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */ ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL; ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL; +#ifdef ZSTD_MULTITHREAD ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; +#endif ZSTD_free(cctx, cctx->customMem); return 0; /* reserved as a potential error code in the future */ } + +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + return ZSTDMT_sizeof_CCtx(cctx->mtctx); +#else + (void) cctx; + return 0; +#endif +} + + size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) { if (cctx==NULL) return 0; /* support sizeof on NULL */ - DEBUGLOG(5, "sizeof(*cctx) : %u", (U32)sizeof(*cctx)); - DEBUGLOG(5, "workSpaceSize : %u", (U32)cctx->workSpaceSize); - DEBUGLOG(5, "streaming buffers : %u", (U32)(cctx->outBuffSize + cctx->inBuffSize)); - DEBUGLOG(5, "inner MTCTX : %u", (U32)ZSTDMT_sizeof_CCtx(cctx->mtctx)); + DEBUGLOG(3, "sizeof(*cctx) : %u", (U32)sizeof(*cctx)); + DEBUGLOG(3, "workSpaceSize (including streaming buffers): %u", (U32)cctx->workSpaceSize); + DEBUGLOG(3, "inner cdict : %u", (U32)ZSTD_sizeof_CDict(cctx->cdictLocal)); + DEBUGLOG(3, "inner MTCTX : %u", (U32)ZSTD_sizeof_mtctx(cctx)); return sizeof(*cctx) + cctx->workSpaceSize + ZSTD_sizeof_CDict(cctx->cdictLocal) - + cctx->outBuffSize + cctx->inBuffSize - + ZSTDMT_sizeof_CCtx(cctx->mtctx); + + ZSTD_sizeof_mtctx(cctx); } size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) { return ZSTD_sizeof_CCtx(zcs); /* same object */ } /* private API call, for dictBuilder only */ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx) { return cctx->appliedParams; } +#define ZSTD_CLEVEL_CUSTOM 999 -/* older variant; will be deprecated */ -size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value) +static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + ZSTD_CCtx_params params, U64 srcSizeHint, size_t dictSize) { - switch(param) - { - case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0; - ZSTD_STATIC_ASSERT(ZSTD_dm_auto==0); - ZSTD_STATIC_ASSERT(ZSTD_dm_rawContent==1); - case ZSTD_p_forceRawDict : cctx->dictMode = (ZSTD_dictMode_e)(value>0); return 0; - default: return ERROR(parameter_unsupported); - } + return (params.compressionLevel == ZSTD_CLEVEL_CUSTOM ? + params.cParams : + ZSTD_getCParams(params.compressionLevel, srcSizeHint, dictSize)); } +static void ZSTD_cLevelToCCtxParams_srcSize(ZSTD_CCtx_params* params, U64 srcSize) +{ + params->cParams = ZSTD_getCParamsFromCCtxParams(*params, srcSize, 0); + params->compressionLevel = ZSTD_CLEVEL_CUSTOM; +} -#define ZSTD_CLEVEL_CUSTOM 999 static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx) { - if (cctx->compressionLevel==ZSTD_CLEVEL_CUSTOM) return; - cctx->requestedParams.cParams = ZSTD_getCParams(cctx->compressionLevel, - cctx->pledgedSrcSizePlusOne-1, 0); - cctx->compressionLevel = ZSTD_CLEVEL_CUSTOM; + ZSTD_cLevelToCCtxParams_srcSize( + &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1); } +static void ZSTD_cLevelToCCtxParams(ZSTD_CCtx_params* params) +{ + ZSTD_cLevelToCCtxParams_srcSize(params, 0); +} + +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( + ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params cctxParams; + memset(&cctxParams, 0, sizeof(cctxParams)); + cctxParams.cParams = cParams; + cctxParams.compressionLevel = ZSTD_CLEVEL_CUSTOM; + return cctxParams; +} + +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( + ZSTD_customMem customMem) +{ + ZSTD_CCtx_params* params; + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + params = (ZSTD_CCtx_params*)ZSTD_calloc( + sizeof(ZSTD_CCtx_params), customMem); + if (!params) { return NULL; } + params->customMem = customMem; + params->compressionLevel = ZSTD_CLEVEL_DEFAULT; + return params; +} + +ZSTD_CCtx_params* ZSTD_createCCtxParams(void) +{ + return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); +} + +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) +{ + if (params == NULL) { return 0; } + ZSTD_free(params, params->customMem); + return 0; +} + +size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params) +{ + return ZSTD_initCCtxParams(params, ZSTD_CLEVEL_DEFAULT); +} + +size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel) { + if (!cctxParams) { return ERROR(GENERIC); } + memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->compressionLevel = compressionLevel; + return 0; +} + +size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) +{ + if (!cctxParams) { return ERROR(GENERIC); } + CHECK_F( ZSTD_checkCParams(params.cParams) ); + memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->cParams = params.cParams; + cctxParams->fParams = params.fParams; + cctxParams->compressionLevel = ZSTD_CLEVEL_CUSTOM; + return 0; +} + +static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( + ZSTD_CCtx_params cctxParams, ZSTD_parameters params) +{ + ZSTD_CCtx_params ret = cctxParams; + ret.cParams = params.cParams; + ret.fParams = params.fParams; + ret.compressionLevel = ZSTD_CLEVEL_CUSTOM; + return ret; +} + #define CLAMPCHECK(val,min,max) { \ if (((val)<(min)) | ((val)>(max))) { \ return ERROR(parameter_outOfBound); \ } } size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); switch(param) { - case ZSTD_p_compressionLevel : - if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel(); /* cap max compression level */ + case ZSTD_p_format : + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_compressionLevel: if (value == 0) return 0; /* special value : 0 means "don't change anything" */ if (cctx->cdict) return ERROR(stage_wrong); - cctx->compressionLevel = value; - return 0; + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); - case ZSTD_p_windowLog : - DEBUGLOG(5, "setting ZSTD_p_windowLog = %u (cdict:%u)", - value, (cctx->cdict!=NULL)); + case ZSTD_p_windowLog: + case ZSTD_p_hashLog: + case ZSTD_p_chainLog: + case ZSTD_p_searchLog: + case ZSTD_p_minMatch: + case ZSTD_p_targetLength: + case ZSTD_p_compressionStrategy: if (value == 0) return 0; /* special value : 0 means "don't change anything" */ if (cctx->cdict) return ERROR(stage_wrong); + ZSTD_cLevelToCParams(cctx); /* Can optimize if srcSize is known */ + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_contentSizeFlag: + case ZSTD_p_checksumFlag: + case ZSTD_p_dictIDFlag: + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize, + * even when referencing into Dictionary content + * default : 0 when using a CDict, 1 when using a Prefix */ + cctx->loadedDictEnd = 0; + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_nbThreads: + if (value==0) return 0; + DEBUGLOG(5, " setting nbThreads : %u", value); + if (value > 1 && cctx->staticSize) { + return ERROR(parameter_unsupported); /* MT not compatible with static alloc */ + } + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_jobSize: + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_overlapSizeLog: + DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->requestedParams.nbThreads); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_enableLongDistanceMatching: + if (cctx->cdict) return ERROR(stage_wrong); + if (value != 0) { + ZSTD_cLevelToCParams(cctx); + } + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_ldmHashLog: + case ZSTD_p_ldmMinMatch: + if (value == 0) return 0; /* special value : 0 means "don't change anything" */ + if (cctx->cdict) return ERROR(stage_wrong); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_ldmBucketSizeLog: + case ZSTD_p_ldmHashEveryLog: + if (cctx->cdict) return ERROR(stage_wrong); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + default: return ERROR(parameter_unsupported); + } +} + +size_t ZSTD_CCtxParam_setParameter( + ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value) +{ + switch(param) + { + case ZSTD_p_format : + if (value > (unsigned)ZSTD_f_zstd1_magicless) + return ERROR(parameter_unsupported); + params->format = (ZSTD_format_e)value; + return 0; + + case ZSTD_p_compressionLevel : + if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel(); + if (value == 0) return 0; + params->compressionLevel = value; + return 0; + + case ZSTD_p_windowLog : + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.windowLog = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.windowLog = value; return 0; case ZSTD_p_hashLog : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.hashLog = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.hashLog = value; return 0; case ZSTD_p_chainLog : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.chainLog = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.chainLog = value; return 0; case ZSTD_p_searchLog : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.searchLog = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.searchLog = value; return 0; case ZSTD_p_minMatch : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.searchLength = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.searchLength = value; return 0; case ZSTD_p_targetLength : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.targetLength = value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.targetLength = value; return 0; case ZSTD_p_compressionStrategy : - if (value == 0) return 0; /* special value : 0 means "don't change anything" */ - if (cctx->cdict) return ERROR(stage_wrong); + if (value == 0) return 0; CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra); - ZSTD_cLevelToCParams(cctx); - cctx->requestedParams.cParams.strategy = (ZSTD_strategy)value; + ZSTD_cLevelToCCtxParams(params); + params->cParams.strategy = (ZSTD_strategy)value; return 0; case ZSTD_p_contentSizeFlag : - DEBUGLOG(5, "set content size flag = %u", (value>0)); /* Content size written in frame header _when known_ (default:1) */ - cctx->requestedParams.fParams.contentSizeFlag = value>0; + DEBUGLOG(5, "set content size flag = %u", (value>0)); + params->fParams.contentSizeFlag = value > 0; return 0; case ZSTD_p_checksumFlag : /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ - cctx->requestedParams.fParams.checksumFlag = value>0; + params->fParams.checksumFlag = value > 0; return 0; case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ DEBUGLOG(5, "set dictIDFlag = %u", (value>0)); - cctx->requestedParams.fParams.noDictIDFlag = (value==0); + params->fParams.noDictIDFlag = (value == 0); return 0; - /* Dictionary parameters */ - case ZSTD_p_dictMode : - if (cctx->cdict) return ERROR(stage_wrong); /* must be set before loading */ - /* restrict dictionary mode, to "rawContent" or "fullDict" only */ - ZSTD_STATIC_ASSERT((U32)ZSTD_dm_fullDict > (U32)ZSTD_dm_rawContent); - if (value > (unsigned)ZSTD_dm_fullDict) - return ERROR(parameter_outOfBound); - cctx->dictMode = (ZSTD_dictMode_e)value; + case ZSTD_p_forceMaxWindow : + params->forceWindow = value > 0; return 0; - case ZSTD_p_refDictContent : - if (cctx->cdict) return ERROR(stage_wrong); /* must be set before loading */ - /* dictionary content will be referenced, instead of copied */ - cctx->dictContentByRef = value>0; + case ZSTD_p_nbThreads : + if (value == 0) return 0; +#ifndef ZSTD_MULTITHREAD + if (value > 1) return ERROR(parameter_unsupported); return 0; +#else + return ZSTDMT_initializeCCtxParameters(params, value); +#endif - case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize, - * even when referencing into Dictionary content - * default : 0 when using a CDict, 1 when using a Prefix */ - cctx->forceWindow = value>0; - cctx->loadedDictEnd = 0; - return 0; + case ZSTD_p_jobSize : +#ifndef ZSTD_MULTITHREAD + return ERROR(parameter_unsupported); +#else + if (params->nbThreads <= 1) return ERROR(parameter_unsupported); + return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_sectionSize, value); +#endif - case ZSTD_p_nbThreads: - if (value==0) return 0; - DEBUGLOG(5, " setting nbThreads : %u", value); + case ZSTD_p_overlapSizeLog : #ifndef ZSTD_MULTITHREAD - if (value > 1) return ERROR(parameter_unsupported); + return ERROR(parameter_unsupported); +#else + if (params->nbThreads <= 1) return ERROR(parameter_unsupported); + return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_overlapSectionLog, value); #endif - if ((value>1) && (cctx->nbThreads != value)) { - if (cctx->staticSize) /* MT not compatible with static alloc */ - return ERROR(parameter_unsupported); - ZSTDMT_freeCCtx(cctx->mtctx); - cctx->nbThreads = 1; - cctx->mtctx = ZSTDMT_createCCtx_advanced(value, cctx->customMem); - if (cctx->mtctx == NULL) return ERROR(memory_allocation); + + case ZSTD_p_enableLongDistanceMatching : + if (value != 0) { + ZSTD_cLevelToCCtxParams(params); + params->cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; } - cctx->nbThreads = value; + return ZSTD_ldm_initializeParameters(¶ms->ldmParams, value); + + case ZSTD_p_ldmHashLog : + if (value == 0) return 0; + CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + params->ldmParams.hashLog = value; return 0; - case ZSTD_p_jobSize: - if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported); - assert(cctx->mtctx != NULL); - return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_sectionSize, value); + case ZSTD_p_ldmMinMatch : + if (value == 0) return 0; + CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX); + params->ldmParams.minMatchLength = value; + return 0; - case ZSTD_p_overlapSizeLog: - DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->nbThreads); - if (cctx->nbThreads <= 1) return ERROR(parameter_unsupported); - assert(cctx->mtctx != NULL); - return ZSTDMT_setMTCtxParameter(cctx->mtctx, ZSTDMT_p_overlapSectionLog, value); + case ZSTD_p_ldmBucketSizeLog : + if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) { + return ERROR(parameter_outOfBound); + } + params->ldmParams.bucketSizeLog = value; + return 0; + case ZSTD_p_ldmHashEveryLog : + if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) { + return ERROR(parameter_outOfBound); + } + params->ldmParams.hashEveryLog = value; + return 0; + default: return ERROR(parameter_unsupported); } } +/** + * This function should be updated whenever ZSTD_CCtx_params is updated. + * Parameters are copied manually before the dictionary is loaded. + * The multithreading parameters jobSize and overlapSizeLog are set only if + * nbThreads > 1. + * + * Pledged srcSize is treated as unknown. + */ +size_t ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) +{ + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + if (cctx->cdict) return ERROR(stage_wrong); + + /* Assume the compression and frame parameters are validated */ + cctx->requestedParams.cParams = params->cParams; + cctx->requestedParams.fParams = params->fParams; + cctx->requestedParams.compressionLevel = params->compressionLevel; + + /* Set force window explicitly since it sets cctx->loadedDictEnd */ + CHECK_F( ZSTD_CCtx_setParameter( + cctx, ZSTD_p_forceMaxWindow, params->forceWindow) ); + + /* Set multithreading parameters explicitly */ + CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbThreads, params->nbThreads) ); + if (params->nbThreads > 1) { + CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_jobSize, params->jobSize) ); + CHECK_F( ZSTD_CCtx_setParameter( + cctx, ZSTD_p_overlapSizeLog, params->overlapSizeLog) ); + } + + /* Copy long distance matching parameters */ + cctx->requestedParams.ldmParams = params->ldmParams; + + /* customMem is used only for create/free params and can be ignored */ + return 0; +} + ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { - DEBUGLOG(5, " setting pledgedSrcSize to %u", (U32)pledgedSrcSize); + DEBUGLOG(4, " setting pledgedSrcSize to %u", (U32)pledgedSrcSize); if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; return 0; } -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +size_t ZSTD_CCtx_loadDictionary_advanced( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */ - DEBUGLOG(5, "load dictionary of size %u", (U32)dictSize); + DEBUGLOG(4, "load dictionary of size %u", (U32)dictSize); ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */ if (dict==NULL || dictSize==0) { /* no dictionary mode */ cctx->cdictLocal = NULL; cctx->cdict = NULL; } else { ZSTD_compressionParameters const cParams = - cctx->compressionLevel == ZSTD_CLEVEL_CUSTOM ? - cctx->requestedParams.cParams : - ZSTD_getCParams(cctx->compressionLevel, 0, dictSize); + ZSTD_getCParamsFromCCtxParams(cctx->requestedParams, 0, dictSize); cctx->cdictLocal = ZSTD_createCDict_advanced( dict, dictSize, - cctx->dictContentByRef, cctx->dictMode, + dictLoadMethod, dictMode, cParams, cctx->customMem); cctx->cdict = cctx->cdictLocal; if (cctx->cdictLocal == NULL) return ERROR(memory_allocation); } return 0; } +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dm_auto); +} + +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dm_auto); +} + + size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->cdict = cdict; - cctx->prefix = NULL; /* exclusive */ - cctx->prefixSize = 0; + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */ return 0; } size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) { + return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dm_rawContent); +} + +size_t ZSTD_CCtx_refPrefix_advanced( + ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode) +{ if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); cctx->cdict = NULL; /* prefix discards any prior cdict */ - cctx->prefix = prefix; - cctx->prefixSize = prefixSize; + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictMode = dictMode; return 0; } static void ZSTD_startNewCompression(ZSTD_CCtx* cctx) { cctx->streamStage = zcss_init; cctx->pledgedSrcSizePlusOne = 0; } /*! ZSTD_CCtx_reset() : * Also dumps dictionary */ void ZSTD_CCtx_reset(ZSTD_CCtx* cctx) { ZSTD_startNewCompression(cctx); cctx->cdict = NULL; } /** ZSTD_checkCParams() : control CParam values remain within authorized range. @return : 0, or an error code if one value is beyond authorized range */ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) { CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) return ERROR(parameter_unsupported); return 0; } /** ZSTD_clampCParams() : * make CParam values within valid range. * @return : valid CParams */ static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { # define CLAMP(val,min,max) { \ if (valmax) val=max; \ } CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); CLAMP(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra; return cParams; } /** ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) { U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); return hashLog - btScale; } /** ZSTD_adjustCParams_internal() : optimize `cPar` for a given input (`srcSize` and `dictSize`). - mostly downsizing to reduce memory consumption and initialization. - Both `srcSize` and `dictSize` are optional (use 0 if unknown), - but if both are 0, no optimization can be done. - Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ + mostly downsizing to reduce memory consumption and initialization latency. + Both `srcSize` and `dictSize` are optional (use 0 if unknown). + Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */ ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { + static const U64 minSrcSize = 513; /* (1<<9) + 1 */ + static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); assert(ZSTD_checkCParams(cPar)==0); - if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ - /* resize params, to use less memory when necessary */ - { U32 const minSrcSize = (srcSize==0) ? 500 : 0; - U64 const rSize = srcSize + dictSize + minSrcSize; - if (rSize < ((U64)1< srcLog) cPar.windowLog = srcLog; - } } + if (dictSize && (srcSize+1<2) /* srcSize unknown */ ) + srcSize = minSrcSize; /* presumed small when there is a dictionary */ + else if (srcSize == 0) + srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */ + + /* resize windowLog if input is small enough, to use less memory */ + if ( (srcSize < maxWindowResize) + && (dictSize < maxWindowResize) ) { + U32 const tSize = (U32)(srcSize + dictSize); + static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; + U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : + ZSTD_highbit32(tSize-1) + 1; + if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; + } if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); - if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); + if (cycleLog > cPar.windowLog) + cPar.chainLog -= (cycleLog - cPar.windowLog); } - if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) + cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ return cPar; } ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) { cPar = ZSTD_clampCParams(cPar); return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); } - -size_t ZSTD_estimateCCtxSize_advanced(ZSTD_compressionParameters cParams) +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); - U32 const divider = (cParams.searchLength==3) ? 3 : 4; - size_t const maxNbSeq = blockSize / divider; - size_t const tokenSpace = blockSize + 11*maxNbSeq; + /* Estimate CCtx size is supported for single-threaded compression only. */ + if (params->nbThreads > 1) { return ERROR(GENERIC); } + { ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(*params, 0, 0); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + U32 const divider = (cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + size_t const chainSize = + (cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams.chainLog); + size_t const hSize = ((size_t)1) << cParams.hashLog; + U32 const hashLog3 = (cParams.searchLength>3) ? + 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const entropySpace = sizeof(ZSTD_entropyCTables_t); + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); - size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog); - size_t const hSize = ((size_t)1) << cParams.hashLog; - U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); - size_t const h3Size = ((size_t)1) << hashLog3; - size_t const entropySpace = sizeof(ZSTD_entropyCTables_t); - size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + size_t const optBudget = + ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<ldmParams.enableLdm ? + ZSTD_ldm_getTableSize(params->ldmParams.hashLog, + params->ldmParams.bucketSizeLog) : 0; - DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx)); - DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace); - return sizeof(ZSTD_CCtx) + neededSpace; + size_t const neededSpace = entropySpace + tableSpace + tokenSpace + + optSpace + ldmSpace; + + DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx)); + DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace); + return sizeof(ZSTD_CCtx) + neededSpace; + } } +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); +} + size_t ZSTD_estimateCCtxSize(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); - return ZSTD_estimateCCtxSize_advanced(cParams); + return ZSTD_estimateCCtxSize_usingCParams(cParams); } -size_t ZSTD_estimateCStreamSize_advanced(ZSTD_compressionParameters cParams) +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) { - size_t const CCtxSize = ZSTD_estimateCCtxSize_advanced(cParams); - size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); - size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize; - size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; - size_t const streamingSize = inBuffSize + outBuffSize; + if (params->nbThreads > 1) { return ERROR(GENERIC); } + { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog); + size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize; + size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; + size_t const streamingSize = inBuffSize + outBuffSize; - return CCtxSize + streamingSize; + return CCtxSize + streamingSize; + } } +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); +} + size_t ZSTD_estimateCStreamSize(int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); - return ZSTD_estimateCStreamSize_advanced(cParams); + return ZSTD_estimateCStreamSize_usingCParams(cParams); } - -static U32 ZSTD_equivalentParams(ZSTD_compressionParameters cParams1, - ZSTD_compressionParameters cParams2) +static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1, + ZSTD_compressionParameters cParams2) { U32 bslog1 = MIN(cParams1.windowLog, ZSTD_BLOCKSIZELOG_MAX); U32 bslog2 = MIN(cParams2.windowLog, ZSTD_BLOCKSIZELOG_MAX); return (bslog1 == bslog2) /* same block size */ & (cParams1.hashLog == cParams2.hashLog) & (cParams1.chainLog == cParams2.chainLog) & (cParams1.strategy == cParams2.strategy) /* opt parser space */ & ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */ } +/** The parameters are equivalent if ldm is not enabled in both sets or + * all the parameters are equivalent. */ +static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1, + ldmParams_t ldmParams2) +{ + return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) || + (ldmParams1.enableLdm == ldmParams2.enableLdm && + ldmParams1.hashLog == ldmParams2.hashLog && + ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog && + ldmParams1.minMatchLength == ldmParams2.minMatchLength && + ldmParams1.hashEveryLog == ldmParams2.hashEveryLog); +} + +/** Equivalence for resetCCtx purposes */ +static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1, + ZSTD_CCtx_params params2) +{ + return ZSTD_equivalentCParams(params1.cParams, params2.cParams) && + ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams); +} + /*! ZSTD_continueCCtx() : * reuse CCtx without reset (note : requires no dictionary) */ -static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 pledgedSrcSize) +static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize) { U32 const end = (U32)(cctx->nextSrc - cctx->base); - DEBUGLOG(5, "continue mode"); + DEBUGLOG(4, "continue mode"); cctx->appliedParams = params; cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; cctx->consumedSrcSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) cctx->appliedParams.fParams.contentSizeFlag = 0; - DEBUGLOG(5, "pledged content size : %u ; flag : %u", + DEBUGLOG(4, "pledged content size : %u ; flag : %u", (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag); cctx->lowLimit = end; cctx->dictLimit = end; cctx->nextToUpdate = end+1; cctx->stage = ZSTDcs_init; cctx->dictID = 0; cctx->loadedDictEnd = 0; { int i; for (i=0; iseqStore.rep[i] = repStartValue[i]; } cctx->optState.litLengthSum = 0; /* force reset of btopt stats */ XXH64_reset(&cctx->xxhState, 0); return 0; } typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e; typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e; /*! ZSTD_resetCCtx_internal() : note : `params` are assumed fully validated at this stage */ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, - ZSTD_parameters params, U64 pledgedSrcSize, + ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_compResetPolicy_e const crp, ZSTD_buffered_policy_e const zbuff) { + DEBUGLOG(4, "ZSTD_resetCCtx_internal"); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + DEBUGLOG(4, "pledgedSrcSize: %u", (U32)pledgedSrcSize); if (crp == ZSTDcrp_continue) { - if (ZSTD_equivalentParams(params.cParams, zc->appliedParams.cParams)) { - DEBUGLOG(5, "ZSTD_equivalentParams()==1"); + if (ZSTD_equivalentParams(params, zc->appliedParams)) { + DEBUGLOG(4, "ZSTD_equivalentParams()==1"); + assert(!(params.ldmParams.enableLdm && + params.ldmParams.hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET)); zc->entropy->hufCTable_repeatMode = HUF_repeat_none; zc->entropy->offcode_repeatMode = FSE_repeat_none; zc->entropy->matchlength_repeatMode = FSE_repeat_none; zc->entropy->litlength_repeatMode = FSE_repeat_none; return ZSTD_continueCCtx(zc, params, pledgedSrcSize); } } + if (params.ldmParams.enableLdm) { + /* Adjust long distance matching parameters */ + ZSTD_ldm_adjustParameters(¶ms.ldmParams, params.cParams.windowLog); + assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); + assert(params.ldmParams.hashEveryLog < 32); + zc->ldmState.hashPower = + ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength); + } + { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params.cParams.windowLog); U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = blockSize + 11*maxNbSeq; size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? - 0 : (1 << params.cParams.chainLog); + 0 : ((size_t)1 << params.cParams.chainLog); size_t const hSize = ((size_t)1) << params.cParams.hashLog; U32 const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); size_t const h3Size = ((size_t)1) << hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; size_t const buffInSize = (zbuff==ZSTDb_buffered) ? ((size_t)1 << params.cParams.windowLog) + blockSize : 0; void* ptr; /* Check if workSpace is large enough, alloc a new one if needed */ { size_t const entropySpace = sizeof(ZSTD_entropyCTables_t); size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<workSpaceSize < neededSpace) { /* too small : resize /*/ + if (zc->workSpaceSize < neededSpace) { /* too small : resize */ DEBUGLOG(5, "Need to update workSpaceSize from %uK to %uK \n", (unsigned)zc->workSpaceSize>>10, (unsigned)neededSpace>>10); /* static cctx : no resize, error out */ if (zc->staticSize) return ERROR(memory_allocation); zc->workSpaceSize = 0; ZSTD_free(zc->workSpace, zc->customMem); zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); if (zc->workSpace == NULL) return ERROR(memory_allocation); zc->workSpaceSize = neededSpace; ptr = zc->workSpace; /* entropy space */ assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */ assert(zc->workSpaceSize >= sizeof(ZSTD_entropyCTables_t)); zc->entropy = (ZSTD_entropyCTables_t*)zc->workSpace; } } /* init params */ zc->appliedParams = params; zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) zc->appliedParams.fParams.contentSizeFlag = 0; DEBUGLOG(5, "pledged content size : %u ; flag : %u", (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); zc->blockSize = blockSize; XXH64_reset(&zc->xxhState, 0); zc->stage = ZSTDcs_init; zc->dictID = 0; zc->loadedDictEnd = 0; zc->entropy->hufCTable_repeatMode = HUF_repeat_none; zc->entropy->offcode_repeatMode = FSE_repeat_none; zc->entropy->matchlength_repeatMode = FSE_repeat_none; zc->entropy->litlength_repeatMode = FSE_repeat_none; zc->nextToUpdate = 1; zc->nextSrc = NULL; zc->base = NULL; zc->dictBase = NULL; zc->dictLimit = 0; zc->lowLimit = 0; { int i; for (i=0; iseqStore.rep[i] = repStartValue[i]; } zc->hashLog3 = hashLog3; zc->optState.litLengthSum = 0; ptr = zc->entropy + 1; /* opt parser space */ if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) { DEBUGLOG(5, "reserving optimal parser space"); assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ zc->optState.litFreq = (U32*)ptr; zc->optState.litLengthFreq = zc->optState.litFreq + (1<optState.matchLengthFreq = zc->optState.litLengthFreq + (MaxLL+1); zc->optState.offCodeFreq = zc->optState.matchLengthFreq + (MaxML+1); ptr = zc->optState.offCodeFreq + (MaxOff+1); zc->optState.matchTable = (ZSTD_match_t*)ptr; ptr = zc->optState.matchTable + ZSTD_OPT_NUM+1; zc->optState.priceTable = (ZSTD_optimal_t*)ptr; ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1; } + /* ldm hash table */ + /* initialize bucketOffsets table later for pointer alignment */ + if (params.ldmParams.enableLdm) { + size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; + memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t)); + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + zc->ldmState.hashTable = (ldmEntry_t*)ptr; + ptr = zc->ldmState.hashTable + ldmHSize; + } + /* table Space */ if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */ assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ zc->hashTable = (U32*)(ptr); zc->chainTable = zc->hashTable + hSize; zc->hashTable3 = zc->chainTable + chainSize; ptr = zc->hashTable3 + h3Size; /* sequences storage */ zc->seqStore.sequencesStart = (seqDef*)ptr; ptr = zc->seqStore.sequencesStart + maxNbSeq; zc->seqStore.llCode = (BYTE*) ptr; zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; ptr = zc->seqStore.litStart + blockSize; + /* ldm bucketOffsets table */ + if (params.ldmParams.enableLdm) { + size_t const ldmBucketSize = + ((size_t)1) << (params.ldmParams.hashLog - + params.ldmParams.bucketSizeLog); + memset(ptr, 0, ldmBucketSize); + zc->ldmState.bucketOffsets = (BYTE*)ptr; + ptr = zc->ldmState.bucketOffsets + ldmBucketSize; + } + /* buffers */ zc->inBuffSize = buffInSize; zc->inBuff = (char*)ptr; zc->outBuffSize = buffOutSize; zc->outBuff = zc->inBuff + buffInSize; return 0; } } /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. * Note : only works with regular variant; * do not use with extDict variant ! */ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { int i; for (i=0; iseqStore.rep[i] = 0; } /*! ZSTD_copyCCtx_internal() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * The "context", in this case, refers to the hash and chain tables, entropy + * tables, and dictionary offsets. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize=0 means "empty" if fParams.contentSizeFlag=1 * @return : 0, or an error code */ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(5, "ZSTD_copyCCtx_internal"); if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); - { ZSTD_parameters params = srcCCtx->appliedParams; + { ZSTD_CCtx_params params = dstCCtx->requestedParams; + /* Copy only compression parameters related to tables. */ + params.cParams = srcCCtx->appliedParams.cParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset, zbuff); } /* copy tables */ - { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->appliedParams.cParams.chainLog); + { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); assert((U32*)dstCCtx->chainTable == (U32*)dstCCtx->hashTable + hSize); /* chainTable must follow hashTable */ assert((U32*)dstCCtx->hashTable3 == (U32*)dstCCtx->chainTable + chainSize); memcpy(dstCCtx->hashTable, srcCCtx->hashTable, tableSpace); /* presumes all tables follow each other */ } /* copy dictionary offsets */ dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3; dstCCtx->nextSrc = srcCCtx->nextSrc; dstCCtx->base = srcCCtx->base; dstCCtx->dictBase = srcCCtx->dictBase; dstCCtx->dictLimit = srcCCtx->dictLimit; dstCCtx->lowLimit = srcCCtx->lowLimit; dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd; dstCCtx->dictID = srcCCtx->dictID; /* copy entropy tables */ memcpy(dstCCtx->entropy, srcCCtx->entropy, sizeof(ZSTD_entropyCTables_t)); return 0; } /*! ZSTD_copyCCtx() : * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). * pledgedSrcSize==0 means "unknown". * @return : 0, or an error code */ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) { ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); fParams.contentSizeFlag = pledgedSrcSize>0; return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff); } /*! ZSTD_reduceTable() : * reduce table indexes by `reducerValue` */ static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue) { U32 u; for (u=0 ; u < size ; u++) { if (table[u] < reducerValue) table[u] = 0; else table[u] -= reducerValue; } } +/*! ZSTD_ldm_reduceTable() : + * reduce table indexes by `reducerValue` */ +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, + U32 const reducerValue) +{ + U32 u; + for (u = 0; u < size; u++) { + if (table[u].offset < reducerValue) table[u].offset = 0; + else table[u].offset -= reducerValue; + } +} + /*! ZSTD_reduceIndex() : * rescale all indexes to avoid future overflow (indexes are U32) */ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) { - { U32 const hSize = 1 << zc->appliedParams.cParams.hashLog; + { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog; ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); } - { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->appliedParams.cParams.chainLog); + { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((U32)1 << zc->appliedParams.cParams.chainLog); ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); } - { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; + { U32 const h3Size = (zc->hashLog3) ? (U32)1 << zc->hashLog3 : 0; ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); } + + { if (zc->appliedParams.ldmParams.enableLdm) { + U32 const ldmHSize = (U32)1 << zc->appliedParams.ldmParams.hashLog; + ZSTD_ldm_reduceTable(zc->ldmState.hashTable, ldmHSize, reducerValue); + } + } } /*-******************************************************* * Block entropic compression *********************************************************/ /* See doc/zstd_compression_format.md for detailed format description */ size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); return ZSTD_blockHeaderSize+srcSize; } static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } memcpy(ostart + flSize, src, srcSize); return srcSize + flSize; } static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) { BYTE* const ostart = (BYTE* const)dst; U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ switch(flSize) { case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); break; case 2: /* 2 - 2 - 12 */ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); break; case 3: /* 2 - 2 - 20 */ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); break; default: /* not necessary : flSize is {1,2,3} */ assert(0); } ostart[flSize] = *(const BYTE*)src; return flSize+1; } static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy, ZSTD_strategy strategy, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const minGain = ZSTD_minGain(srcSize); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; symbolEncodingType_e hType = set_compressed; size_t cLitSize; /* small ? don't even attempt compression (speed opt) */ # define LITERAL_NOENTROPY 63 { size_t const minLitSize = entropy->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ { HUF_repeat repeat = entropy->hufCTable_repeatMode; int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat) : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat); if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */ else { entropy->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */ } if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { entropy->hufCTable_repeatMode = HUF_repeat_none; return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (cLitSize==1) { entropy->hufCTable_repeatMode = HUF_repeat_none; return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); } /* Build header */ switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } case 5: /* 2 - 2 - 18 - 18 */ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; } default: /* not possible : lhSize is {3,4,5} */ assert(0); } return lhSize+cLitSize; } -static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24 }; -static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; - - void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) { BYTE const LL_deltaCode = 19; BYTE const ML_deltaCode = 36; const seqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; for (u=0; u 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; } if (seqStorePtr->longLengthID==1) llCodeTable[seqStorePtr->longLengthPos] = MaxLL; if (seqStorePtr->longLengthID==2) mlCodeTable[seqStorePtr->longLengthPos] = MaxML; } -MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(FSE_repeat* repeatMode, - size_t const mostFrequent, size_t nbSeq, U32 defaultNormLog) +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1 +} ZSTD_defaultPolicy_e; + +MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType( + FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq, + U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed) { #define MIN_SEQ_FOR_DYNAMIC_FSE 64 #define MAX_SEQ_FOR_STATIC_FSE 1000 - - if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); + if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) { + /* Prefer set_basic over set_rle when there are 2 or less symbols, + * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. + * If basic encoding isn't possible, always choose RLE. + */ *repeatMode = FSE_repeat_check; return set_rle; } - if ((*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + if (isDefaultAllowed && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { return set_repeat; } - if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) { + if (isDefaultAllowed && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1))))) { *repeatMode = FSE_repeat_valid; return set_basic; } *repeatMode = FSE_repeat_check; return set_compressed; } MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, FSE_CTable* CTable, U32 FSELog, symbolEncodingType_e type, U32* count, U32 max, BYTE const* codeTable, size_t nbSeq, S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t workspaceSize) { BYTE* op = (BYTE*)dst; BYTE const* const oend = op + dstCapacity; switch (type) { case set_rle: *op = codeTable[0]; CHECK_F(FSE_buildCTable_rle(CTable, (BYTE)max)); return 1; case set_repeat: return 0; case set_basic: CHECK_F(FSE_buildCTable_wksp(CTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); return 0; case set_compressed: { S16 norm[MaxSeq + 1]; size_t nbSeq_1 = nbSeq; const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); if (count[codeTable[nbSeq-1]] > 1) { count[codeTable[nbSeq-1]]--; nbSeq_1--; } + assert(nbSeq_1 > 1); CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max)); { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ if (FSE_isError(NCountSize)) return NCountSize; CHECK_F(FSE_buildCTable_wksp(CTable, norm, max, tableLog, workspace, workspaceSize)); return NCountSize; } } default: return assert(0), ERROR(GENERIC); } } MEM_STATIC size_t ZSTD_encodeSequences(void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, seqDef const* sequences, size_t nbSeq, int longOffsets) { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */ /* first symbols */ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); BIT_flushBits(&blockStream); } BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, ofBits - extraBits); } else { BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); { size_t n; for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); - if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ + if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { BIT_addBits(&blockStream, sequences[n].offset, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ } else { BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ } } FSE_flushCState(&blockStream, &stateMatchLength); FSE_flushCState(&blockStream, &stateOffsetBits); FSE_flushCState(&blockStream, &stateLitLength); { size_t const streamSize = BIT_closeCStream(&blockStream); if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ return streamSize; } } MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, ZSTD_entropyCTables_t* entropy, ZSTD_compressionParameters const* cParams, void* dst, size_t dstCapacity) { const int longOffsets = cParams->windowLog > STREAM_ACCUMULATOR_MIN; U32 count[MaxSeq+1]; FSE_CTable* CTable_LitLength = entropy->litlengthCTable; FSE_CTable* CTable_OffsetBits = entropy->offcodeCTable; FSE_CTable* CTable_MatchLength = entropy->matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* seqHead; ZSTD_STATIC_ASSERT(sizeof(entropy->workspace) >= (1<litStart; size_t const litSize = seqStorePtr->lit - literals; size_t const cSize = ZSTD_compressLiterals( entropy, cParams->strategy, op, dstCapacity, literals, litSize); if (ZSTD_isError(cSize)) return cSize; op += cSize; } /* Sequences Header */ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) return op - ostart; /* seqHead : flags for FSE encoding type */ seqHead = op++; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* CTable for Literal Lengths */ { U32 max = MaxLL; size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace); - LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog); + LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed); { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, entropy->workspace, sizeof(entropy->workspace)); if (ZSTD_isError(countSize)) return countSize; op += countSize; } } /* CTable for Offsets */ { U32 max = MaxOff; size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace); - Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog); + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_defaultPolicy_e const defaultPolicy = max <= DefaultMaxOff ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy); { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, - count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, MaxOff, + count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, entropy->workspace, sizeof(entropy->workspace)); if (ZSTD_isError(countSize)) return countSize; op += countSize; } } /* CTable for MatchLengths */ { U32 max = MaxML; size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace); - MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog); + MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed); { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, entropy->workspace, sizeof(entropy->workspace)); if (ZSTD_isError(countSize)) return countSize; op += countSize; } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const streamSize = ZSTD_encodeSequences(op, oend - op, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets); if (ZSTD_isError(streamSize)) return streamSize; op += streamSize; } return op - ostart; } MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr, ZSTD_entropyCTables_t* entropy, ZSTD_compressionParameters const* cParams, void* dst, size_t dstCapacity, size_t srcSize) { size_t const cSize = ZSTD_compressSequences_internal(seqStorePtr, entropy, cParams, dst, dstCapacity); size_t const minGain = ZSTD_minGain(srcSize); size_t const maxCSize = srcSize - minGain; /* If the srcSize <= dstCapacity, then there is enough space to write a * raw uncompressed block. Since we ran out of space, the block must not * be compressible, so fall back to a raw uncompressed block. */ int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity; if (ZSTD_isError(cSize) && !uncompressibleError) return cSize; /* Check compressibility */ if (cSize >= maxCSize || uncompressibleError) { entropy->hufCTable_repeatMode = HUF_repeat_none; entropy->offcode_repeatMode = FSE_repeat_none; entropy->matchlength_repeatMode = FSE_repeat_none; entropy->litlength_repeatMode = FSE_repeat_none; return 0; } assert(!ZSTD_isError(cSize)); /* confirm repcodes */ { int i; for (i=0; irep[i] = seqStorePtr->repToConfirm[i]; } return cSize; } - -/*! ZSTD_storeSeq() : - Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. - `offsetCode` : distance to match, or 0 == repCode. - `matchCode` : matchLength - MINMATCH -*/ -MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) -{ -#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6) - static const BYTE* g_start = NULL; - U32 const pos = (U32)((const BYTE*)literals - g_start); - if (g_start==NULL) g_start = (const BYTE*)literals; - if ((pos > 0) && (pos < 1000000000)) - DEBUGLOG(6, "Cpos %6u :%5u literals & match %3u bytes at distance %6u", - pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); -#endif - /* copy Literals */ - assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB); - ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); - seqStorePtr->lit += litLength; - - /* literal Length */ - if (litLength>0xFFFF) { - seqStorePtr->longLengthID = 1; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].litLength = (U16)litLength; - - /* match offset */ - seqStorePtr->sequences[0].offset = offsetCode + 1; - - /* match Length */ - if (matchCode>0xFFFF) { - seqStorePtr->longLengthID = 2; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); - } - seqStorePtr->sequences[0].matchLength = (U16)matchCode; - - seqStorePtr->sequences++; -} - - -/*-************************************* -* Match length counter -***************************************/ -static unsigned ZSTD_NbCommonBytes (register size_t val) -{ - if (MEM_isLittleEndian()) { - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanForward64( &r, (U64)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctzll((U64)val) >> 3); -# else - static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, - 0, 3, 1, 3, 1, 4, 2, 7, - 0, 2, 3, 6, 1, 5, 3, 5, - 1, 3, 4, 4, 2, 5, 6, 7, - 7, 0, 1, 2, 3, 3, 4, 6, - 2, 6, 5, 5, 3, 4, 5, 6, - 7, 1, 2, 4, 6, 4, 4, 5, - 7, 2, 6, 5, 7, 6, 7, 7 }; - return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r=0; - _BitScanForward( &r, (U32)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_ctz((U32)val) >> 3); -# else - static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, - 3, 2, 2, 1, 3, 2, 0, 1, - 3, 3, 1, 2, 2, 2, 2, 0, - 3, 1, 2, 0, 1, 0, 1, 1 }; - return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; -# endif - } - } else { /* Big Endian CPU */ - if (MEM_64bits()) { -# if defined(_MSC_VER) && defined(_WIN64) - unsigned long r = 0; - _BitScanReverse64( &r, val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clzll(val) >> 3); -# else - unsigned r; - const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ - if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } - if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } - r += (!val); - return r; -# endif - } else { /* 32 bits */ -# if defined(_MSC_VER) - unsigned long r = 0; - _BitScanReverse( &r, (unsigned long)val ); - return (unsigned)(r>>3); -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (__builtin_clz((U32)val) >> 3); -# else - unsigned r; - if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } - r += (!val); - return r; -# endif - } } -} - - -static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) -{ - const BYTE* const pStart = pIn; - const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); - - while (pIn < pInLoopLimit) { - size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); - if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } - pIn += ZSTD_NbCommonBytes(diff); - return (size_t)(pIn - pStart); - } - if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } - if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } - if ((pIn> (32-h) ; } -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ - -static const U32 prime4bytes = 2654435761U; -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } -static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } - -static const U64 prime5bytes = 889523592379ULL; -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } -static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } - -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } - -static const U64 prime7bytes = 58295818150454627ULL; -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } -static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } - -static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) -{ - switch(mls) - { - default: - case 4: return ZSTD_hash4Ptr(p, hBits); - case 5: return ZSTD_hash5Ptr(p, hBits); - case 6: return ZSTD_hash6Ptr(p, hBits); - case 7: return ZSTD_hash7Ptr(p, hBits); - case 8: return ZSTD_hash8Ptr(p, hBits); - } -} - - -/*-************************************* -* Fast Scan -***************************************/ -static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls) -{ - U32* const hashTable = zc->hashTable; - U32 const hBits = zc->appliedParams.cParams.hashLog; - const BYTE* const base = zc->base; - const BYTE* ip = base + zc->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const size_t fastHashFillStep = 3; - - while(ip <= iend) { - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); - ip += fastHashFillStep; - } -} - - -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, - const void* src, size_t srcSize, - const U32 mls) -{ - U32* const hashTable = cctx->hashTable; - U32 const hBits = cctx->appliedParams.cParams.hashLog; - seqStore_t* seqStorePtr = &(cctx->seqStore); - const BYTE* const base = cctx->base; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const U32 lowestIndex = cctx->dictLimit; - const BYTE* const lowest = base + lowestIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; - U32 offsetSaved = 0; - - /* init */ - ip += (ip==lowest); - { U32 const maxRep = (U32)(ip-lowest); - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; - } - - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ - size_t mLength; - size_t const h = ZSTD_hashPtr(ip, hBits, mls); - U32 const current = (U32)(ip-base); - U32 const matchIndex = hashTable[h]; - const BYTE* match = base + matchIndex; - hashTable[h] = current; /* update hash table */ - - if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); - } else { - U32 offset; - if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { - ip += ((ip-anchor) >> g_searchStrength) + 1; - continue; - } - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - offset = (U32)(ip-match); - while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - } - - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); - /* check immediate repcode */ - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base); - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } } } - - /* save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; - seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, - const void* src, size_t srcSize) -{ - const U32 mls = ctx->appliedParams.cParams.searchLength; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; - case 5 : - ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; - case 6 : - ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; - case 7 : - ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; - } -} - - -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, - const U32 mls) -{ - U32* hashTable = ctx->hashTable; - const U32 hBits = ctx->appliedParams.cParams.hashLog; - seqStore_t* seqStorePtr = &(ctx->seqStore); - const BYTE* const base = ctx->base; - const BYTE* const dictBase = ctx->dictBase; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const U32 lowestIndex = ctx->lowLimit; - const BYTE* const dictStart = dictBase + lowestIndex; - const U32 dictLimit = ctx->dictLimit; - const BYTE* const lowPrefixPtr = base + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t h = ZSTD_hashPtr(ip, hBits, mls); - const U32 matchIndex = hashTable[h]; - const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - const U32 current = (U32)(ip-base); - const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ - const BYTE* repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* repMatch = repBase + repIndex; - size_t mLength; - hashTable[h] = current; /* update hash table */ - - if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); - } else { - if ( (matchIndex < lowestIndex) || - (MEM_read32(match) != MEM_read32(ip)) ) { - ip += ((ip-anchor) >> g_searchStrength) + 1; - continue; - } - { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; - const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; - U32 offset; - mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - offset = current - matchIndex; - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - } } - - /* found a match : store it */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; - hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); - hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } - - /* save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, - const void* src, size_t srcSize) -{ - U32 const mls = ctx->appliedParams.cParams.searchLength; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; - case 5 : - ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; - case 6 : - ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; - case 7 : - ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; - } -} - - -/*-************************************* -* Double Fast -***************************************/ -static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls) -{ - U32* const hashLarge = cctx->hashTable; - U32 const hBitsL = cctx->appliedParams.cParams.hashLog; - U32* const hashSmall = cctx->chainTable; - U32 const hBitsS = cctx->appliedParams.cParams.chainLog; - const BYTE* const base = cctx->base; - const BYTE* ip = base + cctx->nextToUpdate; - const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; - const size_t fastHashFillStep = 3; - - while(ip <= iend) { - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); - hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); - ip += fastHashFillStep; - } -} - - -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, - const void* src, size_t srcSize, - const U32 mls) -{ - U32* const hashLong = cctx->hashTable; - const U32 hBitsL = cctx->appliedParams.cParams.hashLog; - U32* const hashSmall = cctx->chainTable; - const U32 hBitsS = cctx->appliedParams.cParams.chainLog; - seqStore_t* seqStorePtr = &(cctx->seqStore); - const BYTE* const base = cctx->base; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const U32 lowestIndex = cctx->dictLimit; - const BYTE* const lowest = base + lowestIndex; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; - U32 offsetSaved = 0; - - /* init */ - ip += (ip==lowest); - { U32 const maxRep = (U32)(ip-lowest); - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; - } - - /* Main Search Loop */ - while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ - size_t mLength; - size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); - size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); - U32 const current = (U32)(ip-base); - U32 const matchIndexL = hashLong[h2]; - U32 const matchIndexS = hashSmall[h]; - const BYTE* matchLong = base + matchIndexL; - const BYTE* match = base + matchIndexS; - hashLong[h2] = hashSmall[h] = current; /* update hash tables */ - - assert(offset_1 <= current); /* supposed guaranteed by construction */ - if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { - /* favor repcode */ - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); - } else { - U32 offset; - if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { - mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; - offset = (U32)(ip-matchLong); - while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { - size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - U32 const matchIndexL3 = hashLong[hl3]; - const BYTE* matchL3 = base + matchIndexL3; - hashLong[hl3] = current + 1; - if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) { - mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; - ip++; - offset = (U32)(ip-matchL3); - while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ - } else { - mLength = ZSTD_count(ip+4, match+4, iend) + 4; - offset = (U32)(ip-match); - while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - } - } else { - ip += ((ip-anchor) >> g_searchStrength) + 1; - continue; - } - - offset_2 = offset_1; - offset_1 = offset; - - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - } - - /* match found */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = - hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = - hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); - - /* check immediate repcode */ - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); - ip += rLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } } } - - /* save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; - seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - const U32 mls = ctx->appliedParams.cParams.searchLength; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; - case 5 : - ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; - case 6 : - ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; - case 7 : - ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; - } -} - - -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, - const U32 mls) -{ - U32* const hashLong = ctx->hashTable; - U32 const hBitsL = ctx->appliedParams.cParams.hashLog; - U32* const hashSmall = ctx->chainTable; - U32 const hBitsS = ctx->appliedParams.cParams.chainLog; - seqStore_t* seqStorePtr = &(ctx->seqStore); - const BYTE* const base = ctx->base; - const BYTE* const dictBase = ctx->dictBase; - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const U32 lowestIndex = ctx->lowLimit; - const BYTE* const dictStart = dictBase + lowestIndex; - const U32 dictLimit = ctx->dictLimit; - const BYTE* const lowPrefixPtr = base + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; - - /* Search Loop */ - while (ip < ilimit) { /* < instead of <=, because (ip+1) */ - const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); - const U32 matchIndex = hashSmall[hSmall]; - const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; - const BYTE* match = matchBase + matchIndex; - - const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); - const U32 matchLongIndex = hashLong[hLong]; - const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; - const BYTE* matchLong = matchLongBase + matchLongIndex; - - const U32 current = (U32)(ip-base); - const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ - const BYTE* repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* repMatch = repBase + repIndex; - size_t mLength; - hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ - - if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) - && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { - const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; - mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; - ip++; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); - } else { - if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { - const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; - const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; - U32 offset; - mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; - offset = current - matchLongIndex; - while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - - } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { - size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); - U32 const matchIndex3 = hashLong[h3]; - const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; - const BYTE* match3 = match3Base + matchIndex3; - U32 offset; - hashLong[h3] = current + 1; - if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { - const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; - const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; - mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; - ip++; - offset = current+1 - matchIndex3; - while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ - } else { - const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; - const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; - mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; - offset = current - matchIndex; - while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ - } - offset_2 = offset_1; - offset_1 = offset; - ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); - - } else { - ip += ((ip-anchor) >> g_searchStrength) + 1; - continue; - } } - - /* found a match : store it */ - ip += mLength; - anchor = ip; - - if (ip <= ilimit) { - /* Fill Table */ - hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; - hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; - hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); - hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); - /* check immediate repcode */ - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } } - - /* save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, - const void* src, size_t srcSize) -{ - U32 const mls = ctx->appliedParams.cParams.searchLength; - switch(mls) - { - default: /* includes case 3 */ - case 4 : - ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; - case 5 : - ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; - case 6 : - ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; - case 7 : - ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; - } -} - - -/*-************************************* -* Binary Tree search -***************************************/ -/** ZSTD_insertBt1() : add one or multiple positions to tree. -* ip : assumed <= iend-8 . -* @return : nb of positions added */ -static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares, - U32 extDict) -{ - U32* const hashTable = zc->hashTable; - U32 const hashLog = zc->appliedParams.cParams.hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32* const bt = zc->chainTable; - U32 const btLog = zc->appliedParams.cParams.chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 matchIndex = hashTable[h]; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const base = zc->base; - const BYTE* const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* match; - const U32 current = (U32)(ip-base); - const U32 btLow = btMask >= current ? 0 : current - btMask; - U32* smallerPtr = bt + 2*(current&btMask); - U32* largerPtr = smallerPtr + 1; - U32 dummy32; /* to be nullified at the end */ - U32 const windowLow = zc->lowLimit; - U32 matchEndIdx = current+8; - size_t bestLength = 8; -#ifdef ZSTD_C_PREDICT - U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); - U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); - predictedSmall += (predictedSmall>0); - predictedLarge += (predictedLarge>0); -#endif /* ZSTD_C_PREDICT */ - - hashTable[h] = current; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - -#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ - const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ - if (matchIndex == predictedSmall) { - /* no need to check length, result known */ - *smallerPtr = matchIndex; - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - predictedSmall = predictPtr[1] + (predictPtr[1]>0); - continue; - } - if (matchIndex == predictedLarge) { - *largerPtr = matchIndex; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - predictedLarge = predictPtr[0] + (predictPtr[0]>0); - continue; - } -#endif - if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) - matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - bestLength = matchLength; - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - } - - if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ - break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ - - if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ - if (matchEndIdx > current + 8) return matchEndIdx - current - 8; - return 1; -} - - -static size_t ZSTD_insertBtAndFindBestMatch ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, - U32 nbCompares, const U32 mls, - U32 extDict) -{ - U32* const hashTable = zc->hashTable; - U32 const hashLog = zc->appliedParams.cParams.hashLog; - size_t const h = ZSTD_hashPtr(ip, hashLog, mls); - U32* const bt = zc->chainTable; - U32 const btLog = zc->appliedParams.cParams.chainLog - 1; - U32 const btMask = (1 << btLog) - 1; - U32 matchIndex = hashTable[h]; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const base = zc->base; - const BYTE* const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const U32 current = (U32)(ip-base); - const U32 btLow = btMask >= current ? 0 : current - btMask; - const U32 windowLow = zc->lowLimit; - U32* smallerPtr = bt + 2*(current&btMask); - U32* largerPtr = bt + 2*(current&btMask) + 1; - U32 matchEndIdx = current+8; - U32 dummy32; /* to be nullified at the end */ - size_t bestLength = 0; - - hashTable[h] = current; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32* const nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE* match; - - if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) - matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - if (matchLength > matchEndIdx - matchIndex) - matchEndIdx = matchIndex + (U32)matchLength; - if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; - if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - - if (match[matchLength] < ip[matchLength]) { - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - - zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; - return bestLength; -} - - -static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) -{ - const BYTE* const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while(idx < target) - idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0); -} - -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ -static size_t ZSTD_BtFindBestMatch ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 mls) -{ - if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); -} - - -static size_t ZSTD_BtFindBestMatch_selectMLS ( - ZSTD_CCtx* zc, /* Index table will be updated */ - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 matchLengthSearch) -{ - switch(matchLengthSearch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); - case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); - } -} - - -static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) -{ - const BYTE* const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1); -} - - -/** Tree updater, providing best match */ -static size_t ZSTD_BtFindBestMatch_extDict ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 mls) -{ - if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); -} - - -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( - ZSTD_CCtx* zc, /* Index table will be updated */ - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 matchLengthSearch) -{ - switch(matchLengthSearch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); - case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); - case 7 : - case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); - } -} - - - -/* ********************************* -* Hash Chain -***********************************/ -#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] - -/* Update chains up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE_TEMPLATE -U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls) -{ - U32* const hashTable = zc->hashTable; - const U32 hashLog = zc->appliedParams.cParams.hashLog; - U32* const chainTable = zc->chainTable; - const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1; - const BYTE* const base = zc->base; - const U32 target = (U32)(ip - base); - U32 idx = zc->nextToUpdate; - - while(idx < target) { /* catch up */ - size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); - NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; - hashTable[h] = idx; - idx++; - } - - zc->nextToUpdate = target; - return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; -} - - -/* inlining is important to hardwire a hot branch (template emulation) */ -FORCE_INLINE_TEMPLATE -size_t ZSTD_HcFindBestMatch_generic ( - ZSTD_CCtx* zc, /* Index table will be updated */ - const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 mls, const U32 extDict) -{ - U32* const chainTable = zc->chainTable; - const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog); - const U32 chainMask = chainSize-1; - const BYTE* const base = zc->base; - const BYTE* const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const U32 lowLimit = zc->lowLimit; - const U32 current = (U32)(ip-base); - const U32 minChain = current > chainSize ? current - chainSize : 0; - int nbAttempts=maxNbAttempts; - size_t ml=4-1; - - /* HC4 match finder */ - U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls); - - for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { - const BYTE* match; - size_t currentMl=0; - if ((!extDict) || matchIndex >= dictLimit) { - match = base + matchIndex; - if (match[ml] == ip[ml]) /* potentially better */ - currentMl = ZSTD_count(ip, match, iLimit); - } else { - match = dictBase + matchIndex; - if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; - } - - /* save best solution */ - if (currentMl > ml) { - ml = currentMl; - *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; - if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ - } - - if (matchIndex <= minChain) break; - matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); - } - - return ml; -} - - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( - ZSTD_CCtx* zc, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 matchLengthSearch) -{ - switch(matchLengthSearch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); - case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); - } -} - - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( - ZSTD_CCtx* zc, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr, - const U32 maxNbAttempts, const U32 matchLengthSearch) -{ - switch(matchLengthSearch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); - case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); - } -} - - -/* ******************************* -* Common parser - lazy strategy -*********************************/ -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, - const U32 searchMethod, const U32 depth) -{ - seqStore_t* seqStorePtr = &(ctx->seqStore); - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ctx->base + ctx->dictLimit; - - U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog; - U32 const mls = ctx->appliedParams.cParams.searchLength; - - typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, - size_t* offsetPtr, - U32 maxNbAttempts, U32 matchLengthSearch); - searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; - U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0; - - /* init */ - ip += (ip==base); - ctx->nextToUpdate3 = ctx->nextToUpdate; - { U32 const maxRep = (U32)(ip-base); - if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; - if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; - } - - /* Match Loop */ - while (ip < ilimit) { - size_t matchLength=0; - size_t offset=0; - const BYTE* start=ip+1; - - /* check repCode */ - if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { - /* repcode : we take it */ - matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - if (depth==0) goto _storeSequence; - } - - /* first search (depth 0) */ - { size_t offsetFound = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; - } - - if (matchLength < 4) { - ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ - continue; - } - - /* let's try to find a better solution */ - if (depth>=1) - while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { - size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; - int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); - if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; - } - { size_t offset2=99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; /* search a better one */ - } } - - /* let's find an even better one */ - if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { - size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; - int const gain2 = (int)(ml2 * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); - if ((ml2 >= 4) && (gain2 > gain1)) - matchLength = ml2, offset = 0, start = ip; - } - { size_t offset2=99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; - } } } - break; /* nothing found : store previous solution */ - } - - /* NOTE: - * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. - * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which - * overflows the pointer, which is undefined behavior. - */ - /* catch up */ - if (offset) { - while ( (start > anchor) - && (start > base+offset-ZSTD_REP_MOVE) - && (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]) ) /* only search for offset within prefix */ - { start--; matchLength++; } - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); - } - /* store sequence */ -_storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); - anchor = ip = start + matchLength; - } - - /* check immediate repcode */ - while ( (ip <= ilimit) - && ((offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } } - - /* Save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; - seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); -} - -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); -} - -static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); -} - -static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); -} - - -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, - const U32 searchMethod, const U32 depth) -{ - seqStore_t* seqStorePtr = &(ctx->seqStore); - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ctx->base; - const U32 dictLimit = ctx->dictLimit; - const U32 lowestIndex = ctx->lowLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictBase = ctx->dictBase; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const dictStart = dictBase + ctx->lowLimit; - - const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog; - const U32 mls = ctx->appliedParams.cParams.searchLength; - - typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, - size_t* offsetPtr, - U32 maxNbAttempts, U32 matchLengthSearch); - searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; - - U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1]; - - /* init */ - ctx->nextToUpdate3 = ctx->nextToUpdate; - ip += (ip == prefixStart); - - /* Match Loop */ - while (ip < ilimit) { - size_t matchLength=0; - size_t offset=0; - const BYTE* start=ip+1; - U32 current = (U32)(ip-base); - - /* check repCode */ - { const U32 repIndex = (U32)(current+1 - offset_1); - const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (MEM_read32(ip+1) == MEM_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; - if (depth==0) goto _storeSequence; - } } - - /* first search (depth 0) */ - { size_t offsetFound = 99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); - if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; - } - - if (matchLength < 4) { - ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ - continue; - } - - /* let's try to find a better solution */ - if (depth>=1) - while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); - if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; - } } - - /* search match, depth 1 */ - { size_t offset2=99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; /* search a better one */ - } } - - /* let's find an even better one */ - if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); - if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; - } } - - /* search match, depth 2 */ - { size_t offset2=99999999; - size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); - if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; - continue; - } } } - break; /* nothing found : store previous solution */ - } - - /* catch up */ - if (offset) { - U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); - const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; - const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; - while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); - } - - /* store sequence */ -_storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); - anchor = ip = start + matchLength; - } - - /* check immediate repcode */ - while (ip <= ilimit) { - const U32 repIndex = (U32)((ip-base) - offset_2); - const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; - const BYTE* const repMatch = repBase + repIndex; - if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ - if (MEM_read32(ip) == MEM_read32(repMatch)) { - /* repcode detected we should take it */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ - ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); - ip += matchLength; - anchor = ip; - continue; /* faster when present ... (?) */ - } - break; - } } - - /* Save reps for next block */ - seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); -} - -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); -} - -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); -} - -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ - ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); -} - - -/* The optimal parser */ -#include "zstd_opt.h" - -static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); -#else - (void)ctx; (void)src; (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); -#else - (void)ctx; (void)src; (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); -#else - (void)ctx; (void)src; (void)srcSize; - return; -#endif -} - -static void ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) -{ -#ifdef ZSTD_OPT_H_91842398743 - ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); -#else - (void)ctx; (void)src; (void)srcSize; - return; -#endif -} - - /* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ -typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) +typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) { static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra }, { ZSTD_compressBlock_fast_extDict /* default for 0 */, ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict } }; ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); assert((U32)strat >= (U32)ZSTD_fast); assert((U32)strat <= (U32)ZSTD_btultra); return blockCompressor[extDict!=0][(U32)strat]; } +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, + const BYTE* anchor, size_t lastLLSize) +{ + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; +} static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { - ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, zc->lowLimit < zc->dictLimit); const BYTE* const base = zc->base; const BYTE* const istart = (const BYTE*)src; const U32 current = (U32)(istart-base); + size_t lastLLSize; + const BYTE* anchor; + U32 const extDict = zc->lowLimit < zc->dictLimit; + const ZSTD_blockCompressor blockCompressor = + zc->appliedParams.ldmParams.enableLdm + ? (extDict ? ZSTD_compressBlock_ldm_extDict : ZSTD_compressBlock_ldm) + : ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */ ZSTD_resetSeqStore(&(zc->seqStore)); if (current > zc->nextToUpdate + 384) zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* limited update after finding a very long match */ - blockCompressor(zc, src, srcSize); + + lastLLSize = blockCompressor(zc, src, srcSize); + + /* Last literals */ + anchor = (const BYTE*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, anchor, lastLLSize); + return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize); } /*! ZSTD_compress_frameChunk() : * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) * @return : compressed size, or an error code */ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastFrameChunk) { size_t blockSize = cctx->blockSize; size_t remaining = srcSize; const BYTE* ip = (const BYTE*)src; BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; - U32 const maxDist = 1 << cctx->appliedParams.cParams.windowLog; + U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; if (cctx->appliedParams.fParams.checksumFlag && srcSize) XXH64_update(&cctx->xxhState, src, srcSize); while (remaining) { U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); size_t cSize; if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ if (remaining < blockSize) blockSize = remaining; - /* preemptive overflow correction */ + /* preemptive overflow correction: + * 1. correction is large enough: + * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29 - blockSize) - (1< (3<<29 - blockSize) - (1<<30) (NOTE: chainLog <= 30) + * > 1<<29 - 1<<17 + * + * 2. (ip+blockSize - cctx->base) doesn't overflow: + * In 32 bit mode we limit windowLog to 30 so we don't get + * differences larger than 1<<31-1. + * 3. cctx->lowLimit < 1<<32: + * windowLog <= 31 ==> 3<<29 + 1<lowLimit > (3U<<29)) { - U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->appliedParams.cParams.hashLog, cctx->appliedParams.cParams.strategy)) - 1; + U32 const cycleMask = ((U32)1 << ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy)) - 1; U32 const current = (U32)(ip - cctx->base); - U32 const newCurrent = (current & cycleMask) + (1 << cctx->appliedParams.cParams.windowLog); + U32 const newCurrent = (current & cycleMask) + ((U32)1 << cctx->appliedParams.cParams.windowLog); U32 const correction = current - newCurrent; - ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); + ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + assert(current > newCurrent); + assert(correction > 1<<28); /* Loose bound, should be about 1<<29 */ ZSTD_reduceIndex(cctx, correction); cctx->base += correction; cctx->dictBase += correction; cctx->lowLimit -= correction; cctx->dictLimit -= correction; if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0; else cctx->nextToUpdate -= correction; + DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x\n", correction, cctx->lowLimit); } if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { /* enforce maxDist */ U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist; if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit; if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit; } cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize); if (ZSTD_isError(cSize)) return cSize; if (cSize == 0) { /* block is not compressible */ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3); if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); MEM_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); cSize = ZSTD_blockHeaderSize+blockSize; } else { U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); MEM_writeLE24(op, cBlockHeader24); cSize += ZSTD_blockHeaderSize; } remaining -= blockSize; dstCapacity -= cSize; ip += blockSize; op += cSize; } if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; return op-ostart; } static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, - ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) + ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID) { BYTE* const op = (BYTE*)dst; U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ U32 const checksumFlag = params.fParams.checksumFlag>0; - U32 const windowSize = 1U << params.cParams.windowLog; + U32 const windowSize = (U32)1 << params.cParams.windowLog; U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); U32 const fcsCode = params.fParams.contentSizeFlag ? (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); - size_t pos; + size_t pos=0; if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); - DEBUGLOG(5, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", + DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", !params.fParams.noDictIDFlag, dictID, dictIDSizeCode); - MEM_writeLE32(dst, ZSTD_MAGICNUMBER); - op[4] = frameHeaderDecriptionByte; pos=5; + if (params.format == ZSTD_f_zstd1) { + DEBUGLOG(4, "writing zstd magic number"); + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + pos = 4; + } + op[pos++] = frameHeaderDecriptionByte; if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; } switch(fcsCode) { default: assert(0); /* impossible */ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; } return pos; } static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame, U32 lastFrameChunk) { const BYTE* const ip = (const BYTE*) src; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_compressContinue_internal"); DEBUGLOG(5, "stage: %u", cctx->stage); if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ if (frame && (cctx->stage==ZSTDcs_init)) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, - cctx->pledgedSrcSizePlusOne-1, cctx->dictID); + cctx->pledgedSrcSizePlusOne-1, cctx->dictID); if (ZSTD_isError(fhSize)) return fhSize; dstCapacity -= fhSize; dst = (char*)dst + fhSize; cctx->stage = ZSTDcs_ongoing; } /* Check if blocks follow each other */ if (src != cctx->nextSrc) { /* not contiguous */ ptrdiff_t const delta = cctx->nextSrc - ip; cctx->lowLimit = cctx->dictLimit; cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); cctx->dictBase = cctx->base; cctx->base -= delta; cctx->nextToUpdate = cctx->dictLimit; if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit; /* too small extDict */ } /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; cctx->lowLimit = lowLimitMax; } cctx->nextSrc = ip + srcSize; if (srcSize) { size_t const cSize = frame ? ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); if (ZSTD_isError(cSize)) return cSize; cctx->consumedSrcSize += srcSize; return cSize + fhSize; } else return fhSize; } size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); } size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) { - U32 const cLevel = cctx->compressionLevel; - ZSTD_compressionParameters cParams = (cLevel == ZSTD_CLEVEL_CUSTOM) ? - cctx->appliedParams.cParams : - ZSTD_getCParams(cLevel, 0, 0); - return MIN (ZSTD_BLOCKSIZE_MAX, 1 << cParams.windowLog); + ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(cctx->appliedParams, 0, 0); + return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); } size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); } /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize) { const BYTE* const ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; /* input becomes current prefix */ zc->lowLimit = zc->dictLimit; zc->dictLimit = (U32)(zc->nextSrc - zc->base); zc->dictBase = zc->base; zc->base += ip - zc->nextSrc; zc->nextToUpdate = zc->dictLimit; - zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); + zc->loadedDictEnd = zc->appliedParams.forceWindow ? 0 : (U32)(iend - zc->base); zc->nextSrc = iend; if (srcSize <= HASH_READ_SIZE) return 0; switch(zc->appliedParams.cParams.strategy) { case ZSTD_fast: ZSTD_fillHashTable (zc, iend, zc->appliedParams.cParams.searchLength); break; - case ZSTD_dfast: ZSTD_fillDoubleHashTable (zc, iend, zc->appliedParams.cParams.searchLength); break; case ZSTD_greedy: case ZSTD_lazy: case ZSTD_lazy2: if (srcSize >= HASH_READ_SIZE) ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->appliedParams.cParams.searchLength); break; case ZSTD_btlazy2: case ZSTD_btopt: case ZSTD_btultra: if (srcSize >= HASH_READ_SIZE) - ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength); + ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, (U32)1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength); break; default: assert(0); /* not possible : not a valid strategy id */ } zc->nextToUpdate = (U32)(iend - zc->base); return 0; } /* Dictionaries that assign zero probability to symbols that show up causes problems when FSE encoding. Refuse dictionaries that assign zero probability to symbols that we may encounter during compression. NOTE: This behavior is not standard and could be improved in the future. */ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { U32 s; if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); for (s = 0; s <= maxSymbolValue; ++s) { if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); } return 0; } /* Dictionary format : * See : * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format */ /*! ZSTD_loadZstdDictionary() : * @return : 0, or an error code * assumptions : magic number supposed already checked * dictSize supposed > 8 */ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff; ZSTD_STATIC_ASSERT(sizeof(cctx->entropy->workspace) >= (1<dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); dictPtr += 4; - { size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, 255, dictPtr, dictEnd-dictPtr); + { unsigned maxSymbolValue = 255; + size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr); if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); + if (maxSymbolValue < 255) return ERROR(dictionary_corrupted); dictPtr += hufHeaderSize; } { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ CHECK_E( FSE_buildCTable_wksp(cctx->entropy->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)), dictionary_corrupted); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); /* Every match length code must have non-zero probability */ CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); CHECK_E( FSE_buildCTable_wksp(cctx->entropy->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)), dictionary_corrupted); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); /* Every literal length code must have non-zero probability */ CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); CHECK_E( FSE_buildCTable_wksp(cctx->entropy->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)), dictionary_corrupted); dictPtr += litlengthHeaderSize; } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); cctx->seqStore.rep[0] = MEM_readLE32(dictPtr+0); cctx->seqStore.rep[1] = MEM_readLE32(dictPtr+4); cctx->seqStore.rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ } /* All offset values <= dictContentSize + 128 KB must be representable */ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); /* All repCodes must be <= dictContentSize and != 0*/ { U32 u; for (u=0; u<3; u++) { if (cctx->seqStore.rep[u] == 0) return ERROR(dictionary_corrupted); if (cctx->seqStore.rep[u] > dictContentSize) return ERROR(dictionary_corrupted); } } cctx->entropy->hufCTable_repeatMode = HUF_repeat_valid; cctx->entropy->offcode_repeatMode = FSE_repeat_valid; cctx->entropy->matchlength_repeatMode = FSE_repeat_valid; cctx->entropy->litlength_repeatMode = FSE_repeat_valid; return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); } } /** ZSTD_compress_insertDictionary() : * @return : 0, or an error code */ static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode) { DEBUGLOG(5, "ZSTD_compress_insertDictionary"); if ((dict==NULL) || (dictSize<=8)) return 0; /* dict restricted modes */ if (dictMode==ZSTD_dm_rawContent) return ZSTD_loadDictionaryContent(cctx, dict, dictSize); if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { if (dictMode == ZSTD_dm_auto) { DEBUGLOG(5, "raw content dictionary detected"); return ZSTD_loadDictionaryContent(cctx, dict, dictSize); } if (dictMode == ZSTD_dm_fullDict) return ERROR(dictionary_wrong); assert(0); /* impossible */ } /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary(cctx, dict, dictSize); } /*! ZSTD_compressBegin_internal() : * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode, const ZSTD_CDict* cdict, - ZSTD_parameters params, U64 pledgedSrcSize, + ZSTD_CCtx_params params, U64 pledgedSrcSize, ZSTD_buffered_policy_e zbuff) { DEBUGLOG(4, "ZSTD_compressBegin_internal"); - DEBUGLOG(4, "dict ? %s", dict ? "dict" : (cdict ? "cdict" : "none")); - DEBUGLOG(4, "dictMode : %u", (U32)dictMode); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (cdict && cdict->dictContentSize>0) { return ZSTD_copyCCtx_internal(cctx, cdict->refContext, params.fParams, pledgedSrcSize, zbuff); } CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, ZSTDcrp_continue, zbuff) ); return ZSTD_compress_insertDictionary(cctx, dict, dictSize, dictMode); } +size_t ZSTD_compressBegin_advanced_internal( + ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictMode_e dictMode, + ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize) +{ + /* compression parameters verification and optimization */ + CHECK_F( ZSTD_checkCParams(params.cParams) ); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictMode, NULL, + params, pledgedSrcSize, + ZSTDb_not_buffered); +} /*! ZSTD_compressBegin_advanced() : * @return : 0, or an error code */ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { - /* compression parameters verification and optimization */ - CHECK_F(ZSTD_checkCParams(params.cParams)); - return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL, - params, pledgedSrcSize, ZSTDb_not_buffered); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dm_auto, + cctxParams, + pledgedSrcSize); } - size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL, - params, 0, ZSTDb_not_buffered); + cctxParams, 0, ZSTDb_not_buffered); } - size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); } /*! ZSTD_writeEpilogue() : * Ends a frame. * @return : nb of bytes written into dst (or an error code) */ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; size_t fhSize = 0; DEBUGLOG(5, "ZSTD_writeEpilogue"); if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ /* special case : empty frame */ if (cctx->stage == ZSTDcs_init) { fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0); if (ZSTD_isError(fhSize)) return fhSize; dstCapacity -= fhSize; op += fhSize; cctx->stage = ZSTDcs_ongoing; } if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; if (dstCapacity<4) return ERROR(dstSize_tooSmall); MEM_writeLE32(op, cBlockHeader24); op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } if (cctx->appliedParams.fParams.checksumFlag) { U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); if (dstCapacity<4) return ERROR(dstSize_tooSmall); MEM_writeLE32(op, checksum); op += 4; } cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ return op-ostart; } size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); if (ZSTD_isError(cSize)) return cSize; endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); if (ZSTD_isError(endResult)) return endResult; if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */ - DEBUGLOG(5, "end of frame : controlling src size"); + DEBUGLOG(4, "end of frame : controlling src size"); if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) { - DEBUGLOG(5, "error : pledgedSrcSize = %u, while realSrcSize = %u", + DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u", (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); return ERROR(srcSize_wrong); } } return cSize + endResult; } static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { - CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL, - params, srcSize, ZSTDb_not_buffered) ); - return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + return ZSTD_compress_advanced_internal(cctx, + dst, dstCapacity, + src, srcSize, + dict, dictSize, + cctxParams); } size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params) { CHECK_F(ZSTD_checkCParams(params.cParams)); return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); } +/* Internal */ +size_t ZSTD_compress_advanced_internal( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_CCtx_params params) +{ + CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL, + params, srcSize, ZSTDb_not_buffered) ); + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0); params.fParams.contentSizeFlag = 1; return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); } size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); } size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { size_t result; ZSTD_CCtx ctxBody; memset(&ctxBody, 0, sizeof(ctxBody)); ctxBody.customMem = ZSTD_defaultCMem; result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem); /* can't free ctxBody itself, as it's on stack; free only heap content */ return result; } /* ===== Dictionary API ===== */ /*! ZSTD_estimateCDictSize_advanced() : * Estimate amount of memory that will be needed to create a dictionary with following arguments */ -size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, unsigned byReference) +size_t ZSTD_estimateCDictSize_advanced( + size_t dictSize, ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod) { DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict)); - DEBUGLOG(5, "CCtx estimate : %u", (U32)ZSTD_estimateCCtxSize_advanced(cParams)); - return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_advanced(cParams) - + (byReference ? 0 : dictSize); + DEBUGLOG(5, "CCtx estimate : %u", + (U32)ZSTD_estimateCCtxSize_usingCParams(cParams)); + return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_usingCParams(cParams) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) { ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); - return ZSTD_estimateCDictSize_advanced(dictSize, cParams, 0); + return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); } size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support sizeof on NULL */ DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict)); DEBUGLOG(5, "ZSTD_sizeof_CCtx : %u", (U32)ZSTD_sizeof_CCtx(cdict->refContext)); return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); } -static ZSTD_parameters ZSTD_makeParams(ZSTD_compressionParameters cParams, ZSTD_frameParameters fParams) -{ - ZSTD_parameters params; - params.cParams = cParams; - params.fParams = fParams; - return params; -} - static size_t ZSTD_initCDict_internal( ZSTD_CDict* cdict, const void* dictBuffer, size_t dictSize, - unsigned byReference, ZSTD_dictMode_e dictMode, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictMode_e dictMode, ZSTD_compressionParameters cParams) { DEBUGLOG(5, "ZSTD_initCDict_internal, mode %u", (U32)dictMode); - if ((byReference) || (!dictBuffer) || (!dictSize)) { + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { cdict->dictBuffer = NULL; cdict->dictContent = dictBuffer; } else { void* const internalBuffer = ZSTD_malloc(dictSize, cdict->refContext->customMem); cdict->dictBuffer = internalBuffer; cdict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); memcpy(internalBuffer, dictBuffer, dictSize); } cdict->dictContentSize = dictSize; - { ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, - 0 /* checksumFlag */, 0 /* noDictIDFlag */ }; /* dummy */ - ZSTD_parameters const params = ZSTD_makeParams(cParams, fParams); + { ZSTD_CCtx_params cctxParams = cdict->refContext->requestedParams; + cctxParams.cParams = cParams; CHECK_F( ZSTD_compressBegin_internal(cdict->refContext, cdict->dictContent, dictSize, dictMode, NULL, - params, ZSTD_CONTENTSIZE_UNKNOWN, + cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered) ); } return 0; } ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, - unsigned byReference, ZSTD_dictMode_e dictMode, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictMode_e dictMode, ZSTD_compressionParameters cParams, ZSTD_customMem customMem) { DEBUGLOG(5, "ZSTD_createCDict_advanced, mode %u", (U32)dictMode); if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); if (!cdict || !cctx) { ZSTD_free(cdict, customMem); ZSTD_freeCCtx(cctx); return NULL; } cdict->refContext = cctx; - if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dictBuffer, dictSize, - byReference, dictMode, + dictLoadMethod, dictMode, cParams) )) { ZSTD_freeCDict(cdict); return NULL; } return cdict; } } ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); return ZSTD_createCDict_advanced(dict, dictSize, - 0 /* byReference */, ZSTD_dm_auto, + ZSTD_dlm_byCopy, ZSTD_dm_auto, cParams, ZSTD_defaultCMem); } ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) { ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); return ZSTD_createCDict_advanced(dict, dictSize, - 1 /* byReference */, ZSTD_dm_auto, + ZSTD_dlm_byRef, ZSTD_dm_auto, cParams, ZSTD_defaultCMem); } size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = cdict->refContext->customMem; ZSTD_freeCCtx(cdict->refContext); ZSTD_free(cdict->dictBuffer, cMem); ZSTD_free(cdict, cMem); return 0; } } /*! ZSTD_initStaticCDict_advanced() : * Generate a digested dictionary in provided memory area. * workspace: The memory area to emplace the dictionary into. * Provided pointer must 8-bytes aligned. * It must outlive dictionary usage. * workspaceSize: Use ZSTD_estimateCDictSize() * to determine how large workspace must be. * cParams : use ZSTD_getCParams() to transform a compression level * into its relevants cParams. * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) * Note : there is no corresponding "free" function. * Since workspace was allocated externally, it must be freed externally. */ ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, - unsigned byReference, ZSTD_dictMode_e dictMode, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictMode_e dictMode, ZSTD_compressionParameters cParams) { - size_t const cctxSize = ZSTD_estimateCCtxSize_advanced(cParams); - size_t const neededSize = sizeof(ZSTD_CDict) + (byReference ? 0 : dictSize) + size_t const cctxSize = ZSTD_estimateCCtxSize_usingCParams(cParams); + size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize) + cctxSize; ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace; void* ptr; DEBUGLOG(5, "(size_t)workspace & 7 : %u", (U32)(size_t)workspace & 7); if ((size_t)workspace & 7) return NULL; /* 8-aligned */ DEBUGLOG(5, "(workspaceSize < neededSize) : (%u < %u) => %u", (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize)); if (workspaceSize < neededSize) return NULL; - if (!byReference) { + if (dictLoadMethod == ZSTD_dlm_byCopy) { memcpy(cdict+1, dict, dictSize); dict = cdict+1; ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize; } else { ptr = cdict+1; } cdict->refContext = ZSTD_initStaticCCtx(ptr, cctxSize); if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, - 1 /* byReference */, dictMode, + ZSTD_dlm_byRef, dictMode, cParams) )) return NULL; return cdict; } -ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) { - return ZSTD_getParamsFromCCtx(cdict->refContext); +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) { + return cdict->refContext->appliedParams.cParams; } /* ZSTD_compressBegin_usingCDict_advanced() : * cdict must be != NULL */ size_t ZSTD_compressBegin_usingCDict_advanced( ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) { if (cdict==NULL) return ERROR(dictionary_wrong); - { ZSTD_parameters params = cdict->refContext->appliedParams; + { ZSTD_CCtx_params params = cctx->requestedParams; + params.cParams = ZSTD_getCParamsFromCDict(cdict); params.fParams = fParams; DEBUGLOG(5, "ZSTD_compressBegin_usingCDict_advanced"); return ZSTD_compressBegin_internal(cctx, NULL, 0, ZSTD_dm_auto, cdict, params, pledgedSrcSize, ZSTDb_not_buffered); } } /* ZSTD_compressBegin_usingCDict() : * pledgedSrcSize=0 means "unknown" * if pledgedSrcSize>0, it will enable contentSizeFlag */ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; DEBUGLOG(5, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0); } size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); } /*! ZSTD_compress_usingCDict() : * Compression using a digested Dictionary. * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. * Note that compression parameters are decided at CDict creation time * while frame parameters are hardcoded */ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); } /* ****************************************************************** * Streaming ********************************************************************/ ZSTD_CStream* ZSTD_createCStream(void) { return ZSTD_createCStream_advanced(ZSTD_defaultCMem); } ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticCCtx(workspace, workspaceSize); } ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) { /* CStream and CCtx are now same object */ return ZSTD_createCCtx_advanced(customMem); } size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { return ZSTD_freeCCtx(zcs); /* same object */ } /*====== Initialization ======*/ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode, const ZSTD_CDict* cdict, - ZSTD_parameters params, unsigned long long pledgedSrcSize) + const ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_resetCStream_internal"); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ CHECK_F( ZSTD_compressBegin_internal(zcs, dict, dictSize, dictMode, cdict, params, pledgedSrcSize, ZSTDb_buffered) ); zcs->inToCompress = 0; zcs->inBuffPos = 0; zcs->inBuffTarget = zcs->blockSize; zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_load; zcs->frameEnded = 0; return 0; /* ready to go */ } size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) { - ZSTD_parameters params = zcs->requestedParams; + ZSTD_CCtx_params params = zcs->requestedParams; params.fParams.contentSizeFlag = (pledgedSrcSize > 0); - DEBUGLOG(5, "ZSTD_resetCStream"); - if (zcs->compressionLevel != ZSTD_CLEVEL_CUSTOM) { - params.cParams = ZSTD_getCParams(zcs->compressionLevel, pledgedSrcSize, 0 /* dictSize */); - } - return ZSTD_resetCStream_internal(zcs, NULL, 0, zcs->dictMode, zcs->cdict, params, pledgedSrcSize); + params.cParams = ZSTD_getCParamsFromCCtxParams(params, pledgedSrcSize, 0); + DEBUGLOG(4, "ZSTD_resetCStream"); + return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize); } /*! ZSTD_initCStream_internal() : * Note : not static, but hidden (not exposed). Used by zstdmt_compress.c * Assumption 1 : params are valid * Assumption 2 : either dict, or cdict, is defined, not both */ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_parameters params, unsigned long long pledgedSrcSize) + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) { - DEBUGLOG(5, "ZSTD_initCStream_internal"); + DEBUGLOG(4, "ZSTD_initCStream_internal"); assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ if (dict && dictSize >= 8) { DEBUGLOG(5, "loading dictionary of size %u", (U32)dictSize); if (zcs->staticSize) { /* static CCtx : never uses malloc */ /* incompatible with internal cdict creation */ return ERROR(memory_allocation); } ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, - zcs->dictContentByRef, zcs->dictMode, + ZSTD_dlm_byCopy, ZSTD_dm_auto, params.cParams, zcs->customMem); zcs->cdict = zcs->cdictLocal; if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); } else { if (cdict) { - ZSTD_parameters const cdictParams = ZSTD_getParamsFromCDict(cdict); - params.cParams = cdictParams.cParams; /* cParams are enforced from cdict */ + params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict */ } ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = NULL; zcs->cdict = cdict; } + params.compressionLevel = ZSTD_CLEVEL_CUSTOM; zcs->requestedParams = params; - zcs->compressionLevel = ZSTD_CLEVEL_CUSTOM; - return ZSTD_resetCStream_internal(zcs, NULL, 0, zcs->dictMode, zcs->cdict, params, pledgedSrcSize); + + return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize); } /* ZSTD_initCStream_usingCDict_advanced() : * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { /* cannot handle NULL cdict (does not know what to do) */ if (!cdict) return ERROR(dictionary_wrong); - { ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict); + { ZSTD_CCtx_params params = zcs->requestedParams; + params.cParams = ZSTD_getCParamsFromCDict(cdict); params.fParams = fParams; return ZSTD_initCStream_internal(zcs, NULL, 0, cdict, params, pledgedSrcSize); } } /* note : cdict must outlive compression session */ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) { ZSTD_frameParameters const fParams = { 0 /* contentSize */, 0 /* checksum */, 0 /* hideDictID */ }; return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, 0); /* note : will check that cdict != NULL */ } size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); CHECK_F( ZSTD_checkCParams(params.cParams) ); - zcs->requestedParams = params; - zcs->compressionLevel = ZSTD_CLEVEL_CUSTOM; - return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, params, pledgedSrcSize); + return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, pledgedSrcSize); } size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); - zcs->compressionLevel = compressionLevel; - return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, params, 0); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); + return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, 0); } size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize) { - ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); - params.fParams.contentSizeFlag = (pledgedSrcSize>0); - return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, params, pledgedSrcSize); + ZSTD_CCtx_params cctxParams; + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); + cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); + cctxParams.fParams.contentSizeFlag = (pledgedSrcSize>0); + return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize); } size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { - ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); - return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, params, 0); + return ZSTD_initCStream_srcSize(zcs, compressionLevel, 0); } /*====== Compression ======*/ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); if (length) memcpy(dst, src, length); return length; } /** ZSTD_compressStream_generic(): * internal function for all *compressStream*() variants and *compress_generic() + * non-static, because can be called from zstdmt.c * @return : hint size for next input */ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective const flushMode) { const char* const istart = (const char*)input->src; const char* const iend = istart + input->size; const char* ip = istart + input->pos; char* const ostart = (char*)output->dst; char* const oend = ostart + output->size; char* op = ostart + output->pos; U32 someMoreWork = 1; /* check expectations */ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode); assert(zcs->inBuff != NULL); assert(zcs->inBuffSize>0); assert(zcs->outBuff!= NULL); assert(zcs->outBuffSize>0); assert(output->pos <= output->size); assert(input->pos <= input->size); while (someMoreWork) { switch(zcs->streamStage) { case zcss_init: /* call ZSTD_initCStream() first ! */ return ERROR(init_missing); case zcss_load: if ( (flushMode == ZSTD_e_end) && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ size_t const cSize = ZSTD_compressEnd(zcs, op, oend-op, ip, iend-ip); DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize); if (ZSTD_isError(cSize)) return cSize; ip = iend; op += cSize; zcs->frameEnded = 1; ZSTD_startNewCompression(zcs); someMoreWork = 0; break; } /* complete loading into inBuffer */ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; ip += loaded; if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ someMoreWork = 0; break; } if ( (flushMode == ZSTD_e_flush) && (zcs->inBuffPos == zcs->inToCompress) ) { /* empty */ someMoreWork = 0; break; } } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); { void* cDst; size_t cSize; size_t const iSize = zcs->inBuffPos - zcs->inToCompress; size_t oSize = oend-op; unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); if (oSize >= ZSTD_compressBound(iSize)) cDst = op; /* compress into output buffer, to skip flush stage */ else cDst = zcs->outBuff, oSize = zcs->outBuffSize; cSize = lastBlock ? ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); if (ZSTD_isError(cSize)) return cSize; zcs->frameEnded = lastBlock; /* prepare next block */ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; if (zcs->inBuffTarget > zcs->inBuffSize) zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; if (cDst == op) { /* no need to flush */ op += cSize; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed directly in outBuffer"); someMoreWork = 0; ZSTD_startNewCompression(zcs); } break; } zcs->outBuffContentSize = cSize; zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } /* fall-through */ case zcss_flush: DEBUGLOG(5, "flush stage"); { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", (U32)toFlush, (U32)(oend-op), (U32)flushed); op += flushed; zcs->outBuffFlushedSize += flushed; if (toFlush!=flushed) { /* flush not fully completed, presumably because dst is too small */ assert(op==oend); someMoreWork = 0; break; } zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; if (zcs->frameEnded) { DEBUGLOG(5, "Frame completed on flush"); someMoreWork = 0; ZSTD_startNewCompression(zcs); break; } zcs->streamStage = zcss_load; break; } default: /* impossible */ assert(0); } } input->pos = ip - istart; output->pos = op - ostart; if (zcs->frameEnded) return 0; { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; if (hintInSize==0) hintInSize = zcs->blockSize; return hintInSize; } } size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { /* check conditions */ if (output->pos > output->size) return ERROR(GENERIC); if (input->pos > input->size) return ERROR(GENERIC); return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue); } -/*! ZSTDMT_initCStream_internal() : - * Private use only. Init streaming operation. - * expects params to be valid. - * must receive dict, or cdict, or none, but not both. - * @return : 0, or an error code */ -size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, - const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_parameters params, unsigned long long pledgedSrcSize); - size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { + DEBUGLOG(5, "ZSTD_compress_generic"); /* check conditions */ if (output->pos > output->size) return ERROR(GENERIC); if (input->pos > input->size) return ERROR(GENERIC); assert(cctx!=NULL); /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { - const void* const prefix = cctx->prefix; - size_t const prefixSize = cctx->prefixSize; - ZSTD_parameters params = cctx->requestedParams; - if (cctx->compressionLevel != ZSTD_CLEVEL_CUSTOM) - params.cParams = ZSTD_getCParams(cctx->compressionLevel, - cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); - cctx->prefix = NULL; cctx->prefixSize = 0; /* single usage */ - assert(prefix==NULL || cctx->cdict==NULL); /* only one can be set */ + ZSTD_prefixDict const prefixDict = cctx->prefixDict; + ZSTD_CCtx_params params = cctx->requestedParams; + params.cParams = ZSTD_getCParamsFromCCtxParams( + cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ + assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ + DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage"); #ifdef ZSTD_MULTITHREAD - if (cctx->nbThreads > 1) { - DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", cctx->nbThreads); - CHECK_F( ZSTDMT_initCStream_internal(cctx->mtctx, prefix, prefixSize, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); + if (params.nbThreads > 1) { + if (cctx->mtctx == NULL || cctx->appliedParams.nbThreads != params.nbThreads) { + ZSTDMT_freeCCtx(cctx->mtctx); + cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem); + if (cctx->mtctx == NULL) return ERROR(memory_allocation); + } + DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", params.nbThreads); + CHECK_F( ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, prefixDict.dictSize, ZSTD_dm_rawContent, + cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); cctx->streamStage = zcss_load; + cctx->appliedParams.nbThreads = params.nbThreads; } else #endif { - CHECK_F( ZSTD_resetCStream_internal(cctx, prefix, prefixSize, cctx->dictMode, cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); + CHECK_F( ZSTD_resetCStream_internal( + cctx, prefixDict.dict, prefixDict.dictSize, + prefixDict.dictMode, cctx->cdict, params, + cctx->pledgedSrcSizePlusOne-1) ); } } /* compression stage */ #ifdef ZSTD_MULTITHREAD - if (cctx->nbThreads > 1) { + if (cctx->appliedParams.nbThreads > 1) { size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); - DEBUGLOG(5, "ZSTDMT_compressStream_generic : %u", (U32)flushMin); + DEBUGLOG(5, "ZSTDMT_compressStream_generic result : %u", (U32)flushMin); if ( ZSTD_isError(flushMin) || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ ZSTD_startNewCompression(cctx); } return flushMin; } #endif - CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) ); DEBUGLOG(5, "completed ZSTD_compress_generic"); return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_compress_generic_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; ZSTD_inBuffer input = { src, srcSize, *srcPos }; /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp); *dstPos = output.pos; *srcPos = input.pos; return cErr; } /*====== Finalize ======*/ /*! ZSTD_flushStream() : * @return : amount of data remaining to flush */ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; if (output->pos > output->size) return ERROR(GENERIC); CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) ); return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ } size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { ZSTD_inBuffer input = { NULL, 0, 0 }; if (output->pos > output->size) return ERROR(GENERIC); CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) ); { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4; size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize; DEBUGLOG(5, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); return toFlush; } } /*-===== Pre-defined compression levels =====-*/ #define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { { /* "default" - guarantees a monotonically increasing memory budget */ /* W, C, H, S, L, TL, strat */ { 18, 12, 12, 1, 7, 16, ZSTD_fast }, /* level 0 - never used */ { 19, 13, 14, 1, 7, 16, ZSTD_fast }, /* level 1 */ { 19, 15, 16, 1, 6, 16, ZSTD_fast }, /* level 2 */ { 20, 16, 17, 1, 5, 16, ZSTD_dfast }, /* level 3 */ { 20, 17, 18, 1, 5, 16, ZSTD_dfast }, /* level 4 */ { 20, 17, 18, 2, 5, 16, ZSTD_greedy }, /* level 5 */ { 21, 17, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */ { 21, 18, 19, 3, 5, 16, ZSTD_lazy }, /* level 7 */ { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ { 21, 19, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */ { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 13 */ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 14 */ { 22, 21, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 15 */ { 23, 22, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 16 */ { 23, 22, 22, 4, 5, 24, ZSTD_btopt }, /* level 17 */ { 23, 22, 22, 5, 4, 32, ZSTD_btopt }, /* level 18 */ { 23, 23, 22, 6, 3, 48, ZSTD_btopt }, /* level 19 */ { 25, 25, 23, 7, 3, 64, ZSTD_btultra }, /* level 20 */ { 26, 26, 24, 7, 3,256, ZSTD_btultra }, /* level 21 */ { 27, 27, 25, 9, 3,512, ZSTD_btultra }, /* level 22 */ }, { /* for srcSize <= 256 KB */ /* W, C, H, S, L, T, strat */ { 0, 0, 0, 0, 0, 0, ZSTD_fast }, /* level 0 - not used */ { 18, 13, 14, 1, 6, 8, ZSTD_fast }, /* level 1 */ { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */ { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */ { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/ { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/ { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/ { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */ { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/ { 18, 18, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 12.*/ { 18, 19, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13 */ { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/ { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/ { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/ { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/ { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/ { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/ { 18, 19, 18, 11, 3,512, ZSTD_btultra }, /* level 20.*/ { 18, 19, 18, 12, 3,512, ZSTD_btultra }, /* level 21.*/ { 18, 19, 18, 13, 3,512, ZSTD_btultra }, /* level 22.*/ }, { /* for srcSize <= 128 KB */ /* W, C, H, S, L, T, strat */ { 17, 12, 12, 1, 7, 8, ZSTD_fast }, /* level 0 - not used */ { 17, 12, 13, 1, 6, 8, ZSTD_fast }, /* level 1 */ { 17, 13, 16, 1, 5, 8, ZSTD_fast }, /* level 2 */ { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */ { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */ { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */ { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */ { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */ { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/ { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/ { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/ { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/ { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/ { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/ { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/ { 17, 18, 17, 9, 3,256, ZSTD_btultra }, /* level 20.*/ { 17, 18, 17, 10, 3,256, ZSTD_btultra }, /* level 21.*/ { 17, 18, 17, 11, 3,512, ZSTD_btultra }, /* level 22.*/ }, { /* for srcSize <= 16 KB */ /* W, C, H, S, L, T, strat */ { 14, 12, 12, 1, 7, 6, ZSTD_fast }, /* level 0 - not used */ { 14, 14, 14, 1, 6, 6, ZSTD_fast }, /* level 1 */ { 14, 14, 14, 1, 4, 6, ZSTD_fast }, /* level 2 */ { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/ { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/ { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/ { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */ { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */ { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/ { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/ { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/ { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/ { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/ { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/ { 14, 15, 15, 8, 3,256, ZSTD_btultra }, /* level 20.*/ { 14, 15, 15, 9, 3,256, ZSTD_btultra }, /* level 21.*/ { 14, 15, 15, 10, 3,256, ZSTD_btultra }, /* level 22.*/ }, }; #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1) /* This function just controls * the monotonic memory budget increase of ZSTD_defaultCParameters[0]. * Run once, on first ZSTD_getCParams() usage, if ZSTD_DEBUG is enabled */ MEM_STATIC void ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget(void) { int level; for (level=1; level=1) static int g_monotonicTest = 1; if (g_monotonicTest) { ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget(); g_monotonicTest=0; } #endif if (compressionLevel <= 0) compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default; no negative compressionLevel yet */ if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL; { ZSTD_compressionParameters const cp = ZSTD_defaultCParameters[tableID][compressionLevel]; return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); } + } /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). * All fields of `ZSTD_frameParameters` are set to default (0) */ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { ZSTD_parameters params; ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize); memset(¶ms, 0, sizeof(params)); params.cParams = cParams; return params; } Index: vendor/zstd/dist/lib/compress/zstd_compress.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_compress.h (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_compress.h (revision 325597) @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#ifndef ZSTD_COMPRESS_H +#define ZSTD_COMPRESS_H + +/*-************************************* +* Dependencies +***************************************/ +#include "zstd_internal.h" +#ifdef ZSTD_MULTITHREAD +# include "zstdmt_compress.h" +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-************************************* +* Constants +***************************************/ +static const U32 g_searchStrength = 8; +#define HASH_READ_SIZE 8 + + +/*-************************************* +* Context memory management +***************************************/ +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; + +typedef struct ZSTD_prefixDict_s { + const void* dict; + size_t dictSize; + ZSTD_dictMode_e dictMode; +} ZSTD_prefixDict; + +struct ZSTD_CCtx_s { + const BYTE* nextSrc; /* next block here to continue on current prefix */ + const BYTE* base; /* All regular indexes relative to this position */ + const BYTE* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more data */ + U32 nextToUpdate; /* index from which to continue dictionary update */ + U32 nextToUpdate3; /* index from which to continue dictionary update */ + U32 hashLog3; /* dispatch table : larger == faster, more memory */ + U32 loadedDictEnd; /* index of end of dictionary */ + ZSTD_compressionStage_e stage; + U32 dictID; + ZSTD_CCtx_params requestedParams; + ZSTD_CCtx_params appliedParams; + void* workSpace; + size_t workSpaceSize; + size_t blockSize; + U64 pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */ + U64 consumedSrcSize; + XXH64_state_t xxhState; + ZSTD_customMem customMem; + size_t staticSize; + + seqStore_t seqStore; /* sequences storage ptrs */ + optState_t optState; + ldmState_t ldmState; /* long distance matching state */ + U32* hashTable; + U32* hashTable3; + U32* chainTable; + ZSTD_entropyCTables_t* entropy; + + /* streaming */ + char* inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + char* outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage streamStage; + U32 frameEnded; + + /* Dictionary */ + ZSTD_CDict* cdictLocal; + const ZSTD_CDict* cdict; + ZSTD_prefixDict prefixDict; /* single-usage dictionary */ + + /* Multi-threading */ +#ifdef ZSTD_MULTITHREAD + ZSTDMT_CCtx* mtctx; +#endif +}; + + +static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24 }; + +static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; + +/*! ZSTD_storeSeq() : + Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. + `offsetCode` : distance to match, or 0 == repCode. + `matchCode` : matchLength - MINMATCH +*/ +MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) +{ +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6) + static const BYTE* g_start = NULL; + U32 const pos = (U32)((const BYTE*)literals - g_start); + if (g_start==NULL) g_start = (const BYTE*)literals; + if ((pos > 0) && (pos < 1000000000)) + DEBUGLOG(6, "Cpos %6u :%5u literals & match %3u bytes at distance %6u", + pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); +#endif + /* copy Literals */ + assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB); + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); + seqStorePtr->lit += litLength; + + /* literal Length */ + if (litLength>0xFFFF) { + seqStorePtr->longLengthID = 1; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offset = offsetCode + 1; + + /* match Length */ + if (matchCode>0xFFFF) { + seqStorePtr->longLengthID = 2; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].matchLength = (U16)matchCode; + + seqStorePtr->sequences++; +} + + +/*-************************************* +* Match length counter +***************************************/ +static unsigned ZSTD_NbCommonBytes (register size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) +{ + const BYTE* const pStart = pIn; + const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); + + while (pIn < pInLoopLimit) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } + pIn += ZSTD_NbCommonBytes(diff); + return (size_t)(pIn - pStart); + } + if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ + +static const U32 prime4bytes = 2654435761U; +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } + +static const U64 prime5bytes = 889523592379ULL; +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } + +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime7bytes = 58295818150454627ULL; +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + +MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +{ + switch(mls) + { + default: + case 4: return ZSTD_hash4Ptr(p, hBits); + case 5: return ZSTD_hash5Ptr(p, hBits); + case 6: return ZSTD_hash6Ptr(p, hBits); + case 7: return ZSTD_hash7Ptr(p, hBits); + case 8: return ZSTD_hash8Ptr(p, hBits); + } +} + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_COMPRESS_H */ Property changes on: vendor/zstd/dist/lib/compress/zstd_compress.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_double_fast.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_double_fast.c (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_double_fast.c (revision 325597) @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_double_fast.h" + + +void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls) +{ + U32* const hashLarge = cctx->hashTable; + U32 const hBitsL = cctx->appliedParams.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + U32 const hBitsS = cctx->appliedParams.cParams.chainLog; + const BYTE* const base = cctx->base; + const BYTE* ip = base + cctx->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); + hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = cctx->hashTable; + const U32 hBitsL = cctx->appliedParams.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + const U32 hBitsS = cctx->appliedParams.cParams.chainLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 const matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; + hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + + assert(offset_1 <= current); /* supposed guaranteed by construction */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + /* favor repcode */ + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { + size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndexL3 = hashLong[hl3]; + const BYTE* matchL3 = base + matchIndexL3; + hashLong[hl3] = current + 1; + if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + } else { + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + const U32 mls = ctx->appliedParams.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); + } +} + + +static size_t ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = ctx->hashTable; + U32 const hBitsL = ctx->appliedParams.cParams.hashLog; + U32* const hashSmall = ctx->chainTable; + U32 const hBitsS = ctx->appliedParams.cParams.chainLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 matchIndex = hashSmall[hSmall]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); + const U32 matchLongIndex = hashLong[hLong]; + const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; + offset = current - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; + hashLong[h3] = current + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; + ip++; + offset = current+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + offset = current - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->appliedParams.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); + } +} Property changes on: vendor/zstd/dist/lib/compress/zstd_double_fast.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_double_fast.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_double_fast.h (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_double_fast.h (revision 325597) @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_DOUBLE_FAST_H +#define ZSTD_DOUBLE_FAST_H + +#include "zstd_compress.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls); +size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_DOUBLE_FAST_H */ Property changes on: vendor/zstd/dist/lib/compress/zstd_double_fast.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_fast.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_fast.c (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_fast.c (revision 325597) @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_fast.h" + + +void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls) +{ + U32* const hashTable = zc->hashTable; + U32 const hBits = zc->appliedParams.cParams.hashLog; + const BYTE* const base = zc->base; + const BYTE* ip = base + zc->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashTable = cctx->hashTable; + U32 const hBits = cctx->appliedParams.cParams.hashLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hBits, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; + hashTable[h] = current; /* update hash table */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + const U32 mls = ctx->appliedParams.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); + } +} + + +static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* hashTable = ctx->hashTable; + const U32 hBits = ctx->appliedParams.cParams.hashLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t h = ZSTD_hashPtr(ip, hBits, mls); + const U32 matchIndex = hashTable[h]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashTable[h] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ( (matchIndex < lowestIndex) || + (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset = current - matchIndex; + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->appliedParams.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); + } +} Property changes on: vendor/zstd/dist/lib/compress/zstd_fast.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_fast.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_fast.h (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_fast.h (revision 325597) @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FAST_H +#define ZSTD_FAST_H + +#include "zstd_compress.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +void ZSTD_fillHashTable(ZSTD_CCtx* zc, const void* end, const U32 mls); +size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, + const void* src, size_t srcSize); +size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ Property changes on: vendor/zstd/dist/lib/compress/zstd_fast.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_lazy.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_lazy.c (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_lazy.c (revision 325597) @@ -0,0 +1,749 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_lazy.h" + + +/*-************************************* +* Binary Tree search +***************************************/ +/** ZSTD_insertBt1() : add one or multiple positions to tree. +* ip : assumed <= iend-8 . +* @return : nb of positions added */ +static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->appliedParams.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->appliedParams.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = zc->lowLimit; + U32 matchEndIdx = current+8; + size_t bestLength = 8; +#ifdef ZSTD_C_PREDICT + U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + predictedSmall += (predictedSmall>0); + predictedLarge += (predictedLarge>0); +#endif /* ZSTD_C_PREDICT */ + + assert(ip <= iend-8); /* required for h calculation */ + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + +#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ + const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ + if (matchIndex == predictedSmall) { + /* no need to check length, result known */ + *smallerPtr = matchIndex; + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + predictedSmall = predictPtr[1] + (predictPtr[1]>0); + continue; + } + if (matchIndex == predictedLarge) { + *largerPtr = matchIndex; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + predictedLarge = predictPtr[0] + (predictPtr[0]>0); + continue; + } +#endif + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + } + + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ + + if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ + /* match+1 is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ + if (matchEndIdx > current + 8) return matchEndIdx - (current + 8); + return 1; +} + + +static size_t ZSTD_insertBtAndFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + U32 nbCompares, const U32 mls, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->appliedParams.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->appliedParams.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + + assert(ip <= iend-8); /* required for h calculation */ + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return bestLength; +} + + +void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) + idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0); +} + +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + +void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1); +} + + +/** Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + + +/* ********************************* +* Hash Chain +***********************************/ +#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] + +/* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls) +{ + U32* const hashTable = zc->hashTable; + const U32 hashLog = zc->appliedParams.cParams.hashLog; + U32* const chainTable = zc->chainTable; + const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1; + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) { /* catch up */ + size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; + hashTable[h] = idx; + idx++; + } + + zc->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; +} + + +/* inlining is important to hardwire a hot branch (template emulation) */ +FORCE_INLINE_TEMPLATE +size_t ZSTD_HcFindBestMatch_generic ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls, const U32 extDict) +{ + U32* const chainTable = zc->chainTable; + const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog); + const U32 chainMask = chainSize-1; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 lowLimit = zc->lowLimit; + const U32 current = (U32)(ip-base); + const U32 minChain = current > chainSize ? current - chainSize : 0; + int nbAttempts=maxNbAttempts; + size_t ml=4-1; + + /* HC4 match finder */ + U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls); + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex >= dictLimit) { + match = base + matchIndex; + if (match[ml] == ip[ml]) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex; + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); + } + + return ml; +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); + } +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); + } +} + + +/* ******************************* +* Common parser - lazy strategy +*********************************/ +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base + ctx->dictLimit; + + U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog; + U32 const mls = ctx->appliedParams.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; + U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0; + + /* init */ + ip += (ip==base); + ctx->nextToUpdate3 = ctx->nextToUpdate; + { U32 const maxRep = (U32)(ip-base); + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + + /* check repCode */ + if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { + /* repcode : we take it */ + matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + if (depth==0) goto _storeSequence; + } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(ml2 * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((ml2 >= 4) && (gain2 > gain1)) + matchLength = ml2, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* NOTE: + * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. + * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which + * overflows the pointer, which is undefined behavior. + */ + /* catch up */ + if (offset) { + while ( (start > anchor) + && (start > base+offset-ZSTD_REP_MOVE) + && (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]) ) /* only search for offset within prefix */ + { start--; matchLength++; } + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ((offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } + + /* Save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; + seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); +} + +size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); +} + +size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); +} + +size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 dictLimit = ctx->dictLimit; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const dictStart = dictBase + ctx->lowLimit; + + const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog; + const U32 mls = ctx->appliedParams.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; + + U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ip += (ip == prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + U32 current = (U32)(ip-base); + + /* check repCode */ + { const U32 repIndex = (U32)(current+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; + if (depth==0) goto _storeSequence; + } } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 1 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 2 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; + const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while (ip <= ilimit) { + const U32 repIndex = (U32)((ip-base) - offset_2); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + break; + } } + + /* Save reps for next block */ + seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); +} + +size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); +} + +size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); +} + +size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); +} Property changes on: vendor/zstd/dist/lib/compress/zstd_lazy.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_lazy.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_lazy.h (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_lazy.h (revision 325597) @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LAZY_H +#define ZSTD_LAZY_H + +#include "zstd_compress.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls); +void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls); +void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls); + +size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize); + +size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_LAZY_H */ Property changes on: vendor/zstd/dist/lib/compress/zstd_lazy.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_ldm.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_ldm.c (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_ldm.c (revision 325597) @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#include "zstd_ldm.h" + +#include "zstd_fast.h" /* ZSTD_fillHashTable() */ +#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ + +#define LDM_BUCKET_SIZE_LOG 3 +#define LDM_MIN_MATCH_LENGTH 64 +#define LDM_HASH_RLOG 7 +#define LDM_HASH_CHAR_OFFSET 10 + +size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm) +{ + ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); + params->enableLdm = enableLdm>0; + params->hashLog = 0; + params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; + params->minMatchLength = LDM_MIN_MATCH_LENGTH; + params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET; + return 0; +} + +void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog) +{ + if (params->hashLog == 0) { + params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG); + assert(params->hashLog <= ZSTD_HASHLOG_MAX); + } + if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) { + params->hashEveryLog = + windowLog < params->hashLog ? 0 : windowLog - params->hashLog; + } + params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); +} + +size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) { + size_t const ldmHSize = ((size_t)1) << hashLog; + size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog); + size_t const ldmBucketSize = + ((size_t)1) << (hashLog - ldmBucketSizeLog); + return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t))); +} + +/** ZSTD_ldm_getSmallHash() : + * numBits should be <= 32 + * If numBits==0, returns 0. + * @return : the most significant numBits of value. */ +static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) +{ + assert(numBits <= 32); + return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); +} + +/** ZSTD_ldm_getChecksum() : + * numBitsToDiscard should be <= 32 + * @return : the next most significant 32 bits after numBitsToDiscard */ +static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) +{ + assert(numBitsToDiscard <= 32); + return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; +} + +/** ZSTD_ldm_getTag() ; + * Given the hash, returns the most significant numTagBits bits + * after (32 + hbits) bits. + * + * If there are not enough bits remaining, return the last + * numTagBits bits. */ +static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) +{ + assert(numTagBits < 32 && hbits <= 32); + if (32 - hbits < numTagBits) { + return hash & (((U32)1 << numTagBits) - 1); + } else { + return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); + } +} + +/** ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ +static ldmEntry_t* ZSTD_ldm_getBucket( + ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) +{ + return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); +} + +/** ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, + size_t const hash, const ldmEntry_t entry, + ldmParams_t const ldmParams) +{ + BYTE* const bucketOffsets = ldmState->bucketOffsets; + *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; + bucketOffsets[hash]++; + bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; +} + +/** ZSTD_ldm_makeEntryAndInsertByTag() : + * + * Gets the small hash, checksum, and tag from the rollingHash. + * + * If the tag matches (1 << ldmParams.hashEveryLog)-1, then + * creates an ldmEntry from the offset, and inserts it into the hash table. + * + * hBits is the length of the small hash, which is the most significant hBits + * of rollingHash. The checksum is the next 32 most significant bits, followed + * by ldmParams.hashEveryLog bits that make up the tag. */ +static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, + U64 const rollingHash, + U32 const hBits, + U32 const offset, + ldmParams_t const ldmParams) +{ + U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog); + U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1; + if (tag == tagMask) { + U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + ldmEntry_t entry; + entry.offset = offset; + entry.checksum = checksum; + ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); + } +} + +/** ZSTD_ldm_getRollingHash() : + * Get a 64-bit hash using the first len bytes from buf. + * + * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be + * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0) + * + * where the constant a is defined to be prime8bytes. + * + * The implementation adds an offset to each byte, so + * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */ +static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len) +{ + U64 ret = 0; + U32 i; + for (i = 0; i < len; i++) { + ret *= prime8bytes; + ret += buf[i] + LDM_HASH_CHAR_OFFSET; + } + return ret; +} + +/** ZSTD_ldm_ipow() : + * Return base^exp. */ +static U64 ZSTD_ldm_ipow(U64 base, U64 exp) +{ + U64 ret = 1; + while (exp) { + if (exp & 1) { ret *= base; } + exp >>= 1; + base *= base; + } + return ret; +} + +U64 ZSTD_ldm_getHashPower(U32 minMatchLength) { + assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN); + return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1); +} + +/** ZSTD_ldm_updateHash() : + * Updates hash by removing toRemove and adding toAdd. */ +static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower) +{ + hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower); + hash *= prime8bytes; + hash += toAdd + LDM_HASH_CHAR_OFFSET; + return hash; +} + +/** ZSTD_ldm_countBackwardsMatch() : + * Returns the number of bytes that match backwards before pIn and pMatch. + * + * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ +static size_t ZSTD_ldm_countBackwardsMatch( + const BYTE* pIn, const BYTE* pAnchor, + const BYTE* pMatch, const BYTE* pBase) +{ + size_t matchLength = 0; + while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { + pIn--; + pMatch--; + matchLength++; + } + return matchLength; +} + +/** ZSTD_ldm_fillFastTables() : + * + * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. + * This is similar to ZSTD_loadDictionaryContent. + * + * The tables for the other strategies are filled within their + * block compressors. */ +static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end) +{ + const BYTE* const iend = (const BYTE*)end; + const U32 mls = zc->appliedParams.cParams.searchLength; + + switch(zc->appliedParams.cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(zc, iend, mls); + zc->nextToUpdate = (U32)(iend - zc->base); + break; + + case ZSTD_dfast: + ZSTD_fillDoubleHashTable(zc, iend, mls); + zc->nextToUpdate = (U32)(iend - zc->base); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btultra: + break; + default: + assert(0); /* not possible : not a valid strategy id */ + } + + return 0; +} + +/** ZSTD_ldm_fillLdmHashTable() : + * + * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). + * lastHash is the rolling hash that corresponds to lastHashed. + * + * Returns the rolling hash corresponding to position iend-1. */ +static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, + U64 lastHash, const BYTE* lastHashed, + const BYTE* iend, const BYTE* base, + U32 hBits, ldmParams_t const ldmParams) +{ + U64 rollingHash = lastHash; + const BYTE* cur = lastHashed + 1; + + while (cur < iend) { + rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1], + cur[ldmParams.minMatchLength-1], + state->hashPower); + ZSTD_ldm_makeEntryAndInsertByTag(state, + rollingHash, hBits, + (U32)(cur - base), ldmParams); + ++cur; + } + return rollingHash; +} + + +/** ZSTD_ldm_limitTableUpdate() : + * + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ +static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor) +{ + U32 const current = (U32)(anchor - cctx->base); + if (current > cctx->nextToUpdate + 1024) { + cctx->nextToUpdate = + current - MIN(512, current - cctx->nextToUpdate - 1024); + } +} + +typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); +/* defined in zstd_compress.c */ +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict); + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize) +{ + ldmState_t* const ldmState = &(cctx->ldmState); + const ldmParams_t ldmParams = cctx->appliedParams.ldmParams; + const U64 hashPower = ldmState->hashPower; + const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog; + const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog); + const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1; + seqStore_t* const seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE); + + const ZSTD_blockCompressor blockCompressor = + ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0); + U32* const repToConfirm = seqStorePtr->repToConfirm; + U32 savedRep[ZSTD_REP_NUM]; + U64 rollingHash = 0; + const BYTE* lastHashed = NULL; + size_t i, lastLiterals; + + /* Save seqStorePtr->rep and copy repToConfirm */ + for (i = 0; i < ZSTD_REP_NUM; i++) + savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i]; + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + U32 const current = (U32)(ip - base); + size_t forwardMatchLength = 0, backwardMatchLength = 0; + ldmEntry_t* bestEntry = NULL; + if (ip != istart) { + rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0], + lastHashed[ldmParams.minMatchLength], + hashPower); + } else { + rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength); + } + lastHashed = ip; + + /* Do not insert and do not look for a match */ + if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) != + ldmTagMask) { + ip++; + continue; + } + + /* Get the best entry and compute the match lengths */ + { + ldmEntry_t* const bucket = + ZSTD_ldm_getBucket(ldmState, + ZSTD_ldm_getSmallHash(rollingHash, hBits), + ldmParams); + ldmEntry_t* cur; + size_t bestMatchLength = 0; + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + + for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { + const BYTE* const pMatch = cur->offset + base; + size_t curForwardMatchLength, curBackwardMatchLength, + curTotalMatchLength; + if (cur->checksum != checksum || cur->offset <= lowestIndex) { + continue; + } + + curForwardMatchLength = ZSTD_count(ip, pMatch, iend); + if (curForwardMatchLength < ldmParams.minMatchLength) { + continue; + } + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch( + ip, anchor, pMatch, lowest); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + + if (curTotalMatchLength > bestMatchLength) { + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; + } + } + } + + /* No match found -- continue searching */ + if (bestEntry == NULL) { + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, + hBits, current, + ldmParams); + ip++; + continue; + } + + /* Match found */ + mLength = forwardMatchLength + backwardMatchLength; + ip -= backwardMatchLength; + + /* Call the block compressor on the remaining literals */ + { + U32 const matchIndex = bestEntry->offset; + const BYTE* const match = base + matchIndex - backwardMatchLength; + U32 const offset = (U32)(ip - match); + + /* Overwrite rep codes */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = repToConfirm[i]; + + /* Fill tables for block compressor */ + ZSTD_ldm_limitTableUpdate(cctx, anchor); + ZSTD_ldm_fillFastTables(cctx, anchor); + + /* Call block compressor and get remaining literals */ + lastLiterals = blockCompressor(cctx, anchor, ip - anchor); + cctx->nextToUpdate = (U32)(ip - base); + + /* Update repToConfirm with the new offset */ + for (i = ZSTD_REP_NUM - 1; i > 0; i--) + repToConfirm[i] = repToConfirm[i-1]; + repToConfirm[0] = offset; + + /* Store the sequence with the leftover literals */ + ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals, + offset + ZSTD_REP_MOVE, mLength - MINMATCH); + } + + /* Insert the current entry into the hash table */ + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, + (U32)(lastHashed - base), + ldmParams); + + assert(ip + backwardMatchLength == lastHashed); + + /* Fill the hash table from lastHashed+1 to ip+mLength*/ + /* Heuristic: don't need to fill the entire table at end of block */ + if (ip + mLength < ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + mLength, base, hBits, ldmParams); + lastHashed = ip + mLength - 1; + } + ip += mLength; + anchor = ip; + /* Check immediate repcode */ + while ( (ip < ilimit) + && ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest)) + && (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) { + + size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1], + iend) + 4; + /* Swap repToConfirm[1] <=> repToConfirm[0] */ + { + U32 const tmpOff = repToConfirm[1]; + repToConfirm[1] = repToConfirm[0]; + repToConfirm[0] = tmpOff; + } + + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + + /* Fill the hash table from lastHashed+1 to ip+rLength*/ + if (ip + rLength < ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + rLength, base, hBits, ldmParams); + lastHashed = ip + rLength - 1; + } + ip += rLength; + anchor = ip; + } + } + + /* Overwrite rep */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = repToConfirm[i]; + + ZSTD_ldm_limitTableUpdate(cctx, anchor); + ZSTD_ldm_fillFastTables(cctx, anchor); + + lastLiterals = blockCompressor(cctx, anchor, iend - anchor); + cctx->nextToUpdate = (U32)(iend - base); + + /* Restore seqStorePtr->rep */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = savedRep[i]; + + /* Return the last literals size */ + return lastLiterals; +} + +size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize); +} + +static size_t ZSTD_compressBlock_ldm_extDict_generic( + ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + ldmState_t* const ldmState = &(ctx->ldmState); + const ldmParams_t ldmParams = ctx->appliedParams.ldmParams; + const U64 hashPower = ldmState->hashPower; + const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog; + const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog); + const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1; + seqStore_t* const seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE); + + const ZSTD_blockCompressor blockCompressor = + ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1); + U32* const repToConfirm = seqStorePtr->repToConfirm; + U32 savedRep[ZSTD_REP_NUM]; + U64 rollingHash = 0; + const BYTE* lastHashed = NULL; + size_t i, lastLiterals; + + /* Save seqStorePtr->rep and copy repToConfirm */ + for (i = 0; i < ZSTD_REP_NUM; i++) { + savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i]; + } + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + size_t mLength; + const U32 current = (U32)(ip-base); + size_t forwardMatchLength = 0, backwardMatchLength = 0; + ldmEntry_t* bestEntry = NULL; + if (ip != istart) { + rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0], + lastHashed[ldmParams.minMatchLength], + hashPower); + } else { + rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength); + } + lastHashed = ip; + + if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) != + ldmTagMask) { + /* Don't insert and don't look for a match */ + ip++; + continue; + } + + /* Get the best entry and compute the match lengths */ + { + ldmEntry_t* const bucket = + ZSTD_ldm_getBucket(ldmState, + ZSTD_ldm_getSmallHash(rollingHash, hBits), + ldmParams); + ldmEntry_t* cur; + size_t bestMatchLength = 0; + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + + for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { + const BYTE* const curMatchBase = + cur->offset < dictLimit ? dictBase : base; + const BYTE* const pMatch = curMatchBase + cur->offset; + const BYTE* const matchEnd = + cur->offset < dictLimit ? dictEnd : iend; + const BYTE* const lowMatchPtr = + cur->offset < dictLimit ? dictStart : lowPrefixPtr; + size_t curForwardMatchLength, curBackwardMatchLength, + curTotalMatchLength; + + if (cur->checksum != checksum || cur->offset <= lowestIndex) { + continue; + } + + curForwardMatchLength = ZSTD_count_2segments( + ip, pMatch, iend, + matchEnd, lowPrefixPtr); + if (curForwardMatchLength < ldmParams.minMatchLength) { + continue; + } + curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch( + ip, anchor, pMatch, lowMatchPtr); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + + if (curTotalMatchLength > bestMatchLength) { + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; + } + } + } + + /* No match found -- continue searching */ + if (bestEntry == NULL) { + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, + (U32)(lastHashed - base), + ldmParams); + ip++; + continue; + } + + /* Match found */ + mLength = forwardMatchLength + backwardMatchLength; + ip -= backwardMatchLength; + + /* Call the block compressor on the remaining literals */ + { + /* ip = current - backwardMatchLength + * The match is at (bestEntry->offset - backwardMatchLength) */ + U32 const matchIndex = bestEntry->offset; + U32 const offset = current - matchIndex; + + /* Overwrite rep codes */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = repToConfirm[i]; + + /* Fill the hash table for the block compressor */ + ZSTD_ldm_limitTableUpdate(ctx, anchor); + ZSTD_ldm_fillFastTables(ctx, anchor); + + /* Call block compressor and get remaining literals */ + lastLiterals = blockCompressor(ctx, anchor, ip - anchor); + ctx->nextToUpdate = (U32)(ip - base); + + /* Update repToConfirm with the new offset */ + for (i = ZSTD_REP_NUM - 1; i > 0; i--) + repToConfirm[i] = repToConfirm[i-1]; + repToConfirm[0] = offset; + + /* Store the sequence with the leftover literals */ + ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals, + offset + ZSTD_REP_MOVE, mLength - MINMATCH); + } + + /* Insert the current entry into the hash table */ + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, + (U32)(lastHashed - base), + ldmParams); + + /* Fill the hash table from lastHashed+1 to ip+mLength */ + assert(ip + backwardMatchLength == lastHashed); + if (ip + mLength < ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + mLength, base, hBits, + ldmParams); + lastHashed = ip + mLength - 1; + } + ip += mLength; + anchor = ip; + + /* check immediate repcode */ + while (ip < ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - repToConfirm[1]; + const BYTE* repMatch2 = repIndex2 < dictLimit ? + dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & + (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? + dictEnd : iend; + size_t const repLength2 = + ZSTD_count_2segments(ip+4, repMatch2+4, iend, + repEnd2, lowPrefixPtr) + 4; + + U32 tmpOffset = repToConfirm[1]; + repToConfirm[1] = repToConfirm[0]; + repToConfirm[0] = tmpOffset; + + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + + /* Fill the hash table from lastHashed+1 to ip+repLength2*/ + if (ip + repLength2 < ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + repLength2, base, hBits, + ldmParams); + lastHashed = ip + repLength2 - 1; + } + ip += repLength2; + anchor = ip; + continue; + } + break; + } + } + + /* Overwrite rep */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = repToConfirm[i]; + + ZSTD_ldm_limitTableUpdate(ctx, anchor); + ZSTD_ldm_fillFastTables(ctx, anchor); + + /* Call the block compressor one last time on the last literals */ + lastLiterals = blockCompressor(ctx, anchor, iend - anchor); + ctx->nextToUpdate = (U32)(iend - base); + + /* Restore seqStorePtr->rep */ + for (i = 0; i < ZSTD_REP_NUM; i++) + seqStorePtr->rep[i] = savedRep[i]; + + /* Return the last literals size */ + return lastLiterals; +} + +size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize); +} Property changes on: vendor/zstd/dist/lib/compress/zstd_ldm.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_ldm.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_ldm.h (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_ldm.h (revision 325597) @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#ifndef ZSTD_LDM_H +#define ZSTD_LDM_H + +#include "zstd_compress.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-************************************* +* Long distance matching +***************************************/ + +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX +#define ZSTD_LDM_HASHEVERYLOG_NOTSET 9999 + +/** ZSTD_compressBlock_ldm_generic() : + * + * This is a block compressor intended for long distance matching. + * + * The function searches for matches of length at least + * ldmParams.minMatchLength using a hash table in cctx->ldmState. + * Matches can be at a distance of up to cParams.windowLog. + * + * Upon finding a match, the unmatched literals are compressed using a + * ZSTD_blockCompressor (depending on the strategy in the compression + * parameters), which stores the matched sequences. The "long distance" + * match is then stored with the remaining literals from the + * ZSTD_blockCompressor. */ +size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* cctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize); + +/** ZSTD_ldm_initializeParameters() : + * Initialize the long distance matching parameters to their default values. */ +size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm); + +/** ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables. */ +size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog); + +/** ZSTD_ldm_getTableSize() : + * Return prime8bytes^(minMatchLength-1) */ +U64 ZSTD_ldm_getHashPower(U32 minMatchLength); + +/** ZSTD_ldm_adjustParameters() : + * If the params->hashEveryLog is not set, set it to its default value based on + * windowLog and params->hashLog. + * + * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to + * params->hashLog if it is not). */ +void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ Property changes on: vendor/zstd/dist/lib/compress/zstd_ldm.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_opt.c =================================================================== --- vendor/zstd/dist/lib/compress/zstd_opt.c (nonexistent) +++ vendor/zstd/dist/lib/compress/zstd_opt.c (revision 325597) @@ -0,0 +1,957 @@ +/* + * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_opt.h" +#include "zstd_lazy.h" + + +#define ZSTD_LITFREQ_ADD 2 +#define ZSTD_FREQ_DIV 4 +#define ZSTD_MAX_PRICE (1<<30) + +/*-************************************* +* Price functions for optimal parser +***************************************/ +static void ZSTD_setLog2Prices(optState_t* optPtr) +{ + optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1); + optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1); + optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1); + optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1); + optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum)); +} + + +static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize) +{ + unsigned u; + + optPtr->cachedLiterals = NULL; + optPtr->cachedPrice = optPtr->cachedLitLength = 0; + optPtr->staticPrices = 0; + + if (optPtr->litLengthSum == 0) { + if (srcSize <= 1024) optPtr->staticPrices = 1; + + assert(optPtr->litFreq!=NULL); + for (u=0; u<=MaxLit; u++) + optPtr->litFreq[u] = 0; + for (u=0; ulitFreq[src[u]]++; + + optPtr->litSum = 0; + optPtr->litLengthSum = MaxLL+1; + optPtr->matchLengthSum = MaxML+1; + optPtr->offCodeSum = (MaxOff+1); + optPtr->matchSum = (ZSTD_LITFREQ_ADD<litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV); + optPtr->litSum += optPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) + optPtr->litLengthFreq[u] = 1; + for (u=0; u<=MaxML; u++) + optPtr->matchLengthFreq[u] = 1; + for (u=0; u<=MaxOff; u++) + optPtr->offCodeFreq[u] = 1; + } else { + optPtr->matchLengthSum = 0; + optPtr->litLengthSum = 0; + optPtr->offCodeSum = 0; + optPtr->matchSum = 0; + optPtr->litSum = 0; + + for (u=0; u<=MaxLit; u++) { + optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1)); + optPtr->litSum += optPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) { + optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); + optPtr->litLengthSum += optPtr->litLengthFreq[u]; + } + for (u=0; u<=MaxML; u++) { + optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); + optPtr->matchLengthSum += optPtr->matchLengthFreq[u]; + optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3); + } + optPtr->matchSum *= ZSTD_LITFREQ_ADD; + for (u=0; u<=MaxOff; u++) { + optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); + optPtr->offCodeSum += optPtr->offCodeFreq[u]; + } + } + + ZSTD_setLog2Prices(optPtr); +} + + +static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals) +{ + U32 price, u; + + if (optPtr->staticPrices) + return ZSTD_highbit32((U32)litLength+1) + (litLength*6); + + if (litLength == 0) + return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1); + + /* literals */ + if (optPtr->cachedLiterals == literals) { + U32 const additional = litLength - optPtr->cachedLitLength; + const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength; + price = optPtr->cachedPrice + additional * optPtr->log2litSum; + for (u=0; u < additional; u++) + price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1); + optPtr->cachedPrice = price; + optPtr->cachedLitLength = litLength; + } else { + price = litLength * optPtr->log2litSum; + for (u=0; u < litLength; u++) + price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1); + + if (litLength >= 12) { + optPtr->cachedLiterals = literals; + optPtr->cachedPrice = price; + optPtr->cachedLitLength = litLength; + } + } + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1); + } + + return price; +} + + +FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra) +{ + /* offset */ + U32 price; + BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + + if (optPtr->staticPrices) + return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode; + + price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1); + if (!ultra && offCode >= 20) price += (offCode-19)*2; + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1); + } + + return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor; +} + + +static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength) +{ + U32 u; + + /* literals */ + optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; + for (u=0; u < litLength; u++) + optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + optPtr->litLengthFreq[llCode]++; + optPtr->litLengthSum++; + } + + /* match offset */ + { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + optPtr->offCodeSum++; + optPtr->offCodeFreq[offCode]++; + } + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + optPtr->matchLengthFreq[mlCode]++; + optPtr->matchLengthSum++; + } + + ZSTD_setLog2Prices(optPtr); +} + + +#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ + { \ + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ + opt[pos].mlen = mlen_; \ + opt[pos].off = offset_; \ + opt[pos].litlen = litlen_; \ + opt[pos].price = price_; \ + } + + +/* function safe only for comparisons */ +static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) +{ + switch (length) + { + default : + case 4 : return MEM_read32(memPtr); + case 3 : if (MEM_isLittleEndian()) + return MEM_read32(memPtr)<<8; + else + return MEM_read32(memPtr)>>8; + } +} + + +/* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +static +U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip) +{ + U32* const hashTable3 = zc->hashTable3; + U32 const hashLog3 = zc->hashLog3; + const BYTE* const base = zc->base; + U32 idx = zc->nextToUpdate3; + const U32 target = zc->nextToUpdate3 = (U32)(ip - base); + const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); + + while(idx < target) { + hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; + idx++; + } + + return hashTable3[hash3]; +} + + +/*-************************************* +* Binary Tree search +***************************************/ +static U32 ZSTD_insertBtAndGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + U32 nbCompares, const U32 mls, + U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) +{ + const BYTE* const base = zc->base; + const U32 current = (U32)(ip-base); + const U32 hashLog = zc->appliedParams.cParams.hashLog; + const size_t h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const hashTable = zc->hashTable; + U32 matchIndex = hashTable[h]; + U32* const bt = zc->chainTable; + const U32 btLog = zc->appliedParams.cParams.chainLog - 1; + const U32 btMask= (1U << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + + const U32 minMatch = (mls == 3) ? 3 : 4; + size_t bestLength = minMatchLen-1; + + if (minMatch == 3) { /* HC3 match finder */ + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); + if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex3 >= dictLimit) { + match = base + matchIndex3; + if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex3; + if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; + } + + /* save best solution */ + if (currentMl > bestLength) { + bestLength = currentMl; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; + matches[mnum].len = (U32)currentMl; + mnum++; + if (currentMl > ZSTD_OPT_NUM) goto update; + if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ + } + } + } + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) { + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1; + } + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; + matches[mnum].len = (U32)matchLength; + mnum++; + if (matchLength > ZSTD_OPT_NUM) break; + if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + +update: + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return mnum; +} + + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + + +/*-******************************* +* Optimal parser +*********************************/ +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + optState_t* optStatePtr = &(ctx->optState); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const BYTE* const prefixStart = base + ctx->dictLimit; + + const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog; + const U32 sufficient_len = ctx->appliedParams.cParams.targetLength; + const U32 mls = ctx->appliedParams.cParams.searchLength; + const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = optStatePtr->priceTable; + ZSTD_match_t* matches = optStatePtr->matchTable; + const BYTE* inr; + U32 offset, rep[ZSTD_REP_NUM]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + { U32 i; for (i=0; irep[i]; } + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) { + mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch; + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + best_off = i - (ip == anchor); + do { + price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); + + if (!last_pos && !match_num) { ip++; continue; } + + if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + /* set prices using matches at position = 0 */ + best_mlen = (last_pos) ? last_pos : minMatch; + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ + mlen++; + } } + + if (last_pos < minMatch) { ip++; continue; } + + /* initialize opt[0] */ + { U32 i ; for (i=0; i litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + /* If opt[cur].off == ZSTD_REP_MOVE_OPT, then mlen != 1. + * offset ZSTD_REP_MOVE_OPT is used for the special case + * litLength == 0, where offset 0 means something special. + * mlen == 1 means the previous byte was stored as a literal, + * so they are mutually exclusive. + */ + assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1)); + opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) + && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) { + mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos;) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + if (litLength==0) offset--; + } + + ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); +} + +size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + optState_t* optStatePtr = &(ctx->optState); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 lowestIndex = ctx->lowLimit; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + + const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog; + const U32 sufficient_len = ctx->appliedParams.cParams.targetLength; + const U32 mls = ctx->appliedParams.cParams.searchLength; + const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = optStatePtr->priceTable; + ZSTD_match_t* matches = optStatePtr->matchTable; + const BYTE* inr; + + /* init */ + U32 offset, rep[ZSTD_REP_NUM]; + { U32 i; for (i=0; irep[i]; } + + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + U32 current = (U32)(ip-base); + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + opt[0].litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i = (ip==anchor); i 0 && repCur <= (S32)current) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + + best_off = i - (ip==anchor); + litlen = opt[0].litlen; + do { + price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ + + if (!last_pos && !match_num) { ip++; continue; } + + { U32 i; for (i=0; i sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + best_mlen = (last_pos) ? last_pos : minMatch; + + /* set prices using matches at position = 0 */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + litlen = opt[0].litlen; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); + mlen++; + } } + + if (last_pos < minMatch) { + ip++; continue; + } + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + inr = ip + cur; + + if (opt[cur-1].mlen == 1) { + litlen = opt[cur-1].litlen + 1; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + assert(!(opt[cur].off == ZSTD_REP_MOVE_OPT && mlen == 1)); + opt[cur].rep[0] = (opt[cur].off == ZSTD_REP_MOVE_OPT) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i = (mlen != 1); i 0 && repCur <= (S32)(current+cur)) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } /* for (cur = 1; cur <= last_pos; cur++) */ + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos; ) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + + if (litLength==0) offset--; + } + + ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); +} + +size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); +} Property changes on: vendor/zstd/dist/lib/compress/zstd_opt.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: vendor/zstd/dist/lib/compress/zstd_opt.h =================================================================== --- vendor/zstd/dist/lib/compress/zstd_opt.h (revision 325596) +++ vendor/zstd/dist/lib/compress/zstd_opt.h (revision 325597) @@ -1,938 +1,30 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ +#ifndef ZSTD_OPT_H +#define ZSTD_OPT_H -/* Note : this file is intended to be included within zstd_compress.c */ +#include "zstd_compress.h" +#if defined (__cplusplus) +extern "C" { +#endif -#ifndef ZSTD_OPT_H_91842398743 -#define ZSTD_OPT_H_91842398743 +size_t ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize); -#define ZSTD_LITFREQ_ADD 2 -#define ZSTD_FREQ_DIV 4 -#define ZSTD_MAX_PRICE (1<<30) - -/*-************************************* -* Price functions for optimal parser -***************************************/ -static void ZSTD_setLog2Prices(optState_t* optPtr) -{ - optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1); - optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1); - optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1); - optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1); - optPtr->factor = 1 + ((optPtr->litSum>>5) / optPtr->litLengthSum) + ((optPtr->litSum<<1) / (optPtr->litSum + optPtr->matchSum)); +#if defined (__cplusplus) } +#endif - -static void ZSTD_rescaleFreqs(optState_t* optPtr, const BYTE* src, size_t srcSize) -{ - unsigned u; - - optPtr->cachedLiterals = NULL; - optPtr->cachedPrice = optPtr->cachedLitLength = 0; - optPtr->staticPrices = 0; - - if (optPtr->litLengthSum == 0) { - if (srcSize <= 1024) optPtr->staticPrices = 1; - - assert(optPtr->litFreq!=NULL); - for (u=0; u<=MaxLit; u++) - optPtr->litFreq[u] = 0; - for (u=0; ulitFreq[src[u]]++; - - optPtr->litSum = 0; - optPtr->litLengthSum = MaxLL+1; - optPtr->matchLengthSum = MaxML+1; - optPtr->offCodeSum = (MaxOff+1); - optPtr->matchSum = (ZSTD_LITFREQ_ADD<litFreq[u] = 1 + (optPtr->litFreq[u]>>ZSTD_FREQ_DIV); - optPtr->litSum += optPtr->litFreq[u]; - } - for (u=0; u<=MaxLL; u++) - optPtr->litLengthFreq[u] = 1; - for (u=0; u<=MaxML; u++) - optPtr->matchLengthFreq[u] = 1; - for (u=0; u<=MaxOff; u++) - optPtr->offCodeFreq[u] = 1; - } else { - optPtr->matchLengthSum = 0; - optPtr->litLengthSum = 0; - optPtr->offCodeSum = 0; - optPtr->matchSum = 0; - optPtr->litSum = 0; - - for (u=0; u<=MaxLit; u++) { - optPtr->litFreq[u] = 1 + (optPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1)); - optPtr->litSum += optPtr->litFreq[u]; - } - for (u=0; u<=MaxLL; u++) { - optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); - optPtr->litLengthSum += optPtr->litLengthFreq[u]; - } - for (u=0; u<=MaxML; u++) { - optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); - optPtr->matchLengthSum += optPtr->matchLengthFreq[u]; - optPtr->matchSum += optPtr->matchLengthFreq[u] * (u + 3); - } - optPtr->matchSum *= ZSTD_LITFREQ_ADD; - for (u=0; u<=MaxOff; u++) { - optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); - optPtr->offCodeSum += optPtr->offCodeFreq[u]; - } - } - - ZSTD_setLog2Prices(optPtr); -} - - -static U32 ZSTD_getLiteralPrice(optState_t* optPtr, U32 litLength, const BYTE* literals) -{ - U32 price, u; - - if (optPtr->staticPrices) - return ZSTD_highbit32((U32)litLength+1) + (litLength*6); - - if (litLength == 0) - return optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[0]+1); - - /* literals */ - if (optPtr->cachedLiterals == literals) { - U32 const additional = litLength - optPtr->cachedLitLength; - const BYTE* literals2 = optPtr->cachedLiterals + optPtr->cachedLitLength; - price = optPtr->cachedPrice + additional * optPtr->log2litSum; - for (u=0; u < additional; u++) - price -= ZSTD_highbit32(optPtr->litFreq[literals2[u]]+1); - optPtr->cachedPrice = price; - optPtr->cachedLitLength = litLength; - } else { - price = litLength * optPtr->log2litSum; - for (u=0; u < litLength; u++) - price -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1); - - if (litLength >= 12) { - optPtr->cachedLiterals = literals; - optPtr->cachedPrice = price; - optPtr->cachedLitLength = litLength; - } - } - - /* literal Length */ - { const BYTE LL_deltaCode = 19; - const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; - price += LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1); - } - - return price; -} - - -FORCE_INLINE_TEMPLATE U32 ZSTD_getPrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra) -{ - /* offset */ - U32 price; - BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); - - if (optPtr->staticPrices) - return ZSTD_getLiteralPrice(optPtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode; - - price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1); - if (!ultra && offCode >= 20) price += (offCode-19)*2; - - /* match Length */ - { const BYTE ML_deltaCode = 36; - const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; - price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1); - } - - return price + ZSTD_getLiteralPrice(optPtr, litLength, literals) + optPtr->factor; -} - - -static void ZSTD_updatePrice(optState_t* optPtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength) -{ - U32 u; - - /* literals */ - optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; - for (u=0; u < litLength; u++) - optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; - - /* literal Length */ - { const BYTE LL_deltaCode = 19; - const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; - optPtr->litLengthFreq[llCode]++; - optPtr->litLengthSum++; - } - - /* match offset */ - { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); - optPtr->offCodeSum++; - optPtr->offCodeFreq[offCode]++; - } - - /* match Length */ - { const BYTE ML_deltaCode = 36; - const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; - optPtr->matchLengthFreq[mlCode]++; - optPtr->matchLengthSum++; - } - - ZSTD_setLog2Prices(optPtr); -} - - -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ - { \ - while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ - opt[pos].mlen = mlen_; \ - opt[pos].off = offset_; \ - opt[pos].litlen = litlen_; \ - opt[pos].price = price_; \ - } - - -/* function safe only for comparisons */ -static U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) -{ - switch (length) - { - default : - case 4 : return MEM_read32(memPtr); - case 3 : if (MEM_isLittleEndian()) - return MEM_read32(memPtr)<<8; - else - return MEM_read32(memPtr)>>8; - } -} - - -/* Update hashTable3 up to ip (excluded) - Assumption : always within prefix (i.e. not within extDict) */ -static -U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip) -{ - U32* const hashTable3 = zc->hashTable3; - U32 const hashLog3 = zc->hashLog3; - const BYTE* const base = zc->base; - U32 idx = zc->nextToUpdate3; - const U32 target = zc->nextToUpdate3 = (U32)(ip - base); - const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); - - while(idx < target) { - hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; - idx++; - } - - return hashTable3[hash3]; -} - - -/*-************************************* -* Binary Tree search -***************************************/ -static U32 ZSTD_insertBtAndGetAllMatches ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iLimit, - U32 nbCompares, const U32 mls, - U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) -{ - const BYTE* const base = zc->base; - const U32 current = (U32)(ip-base); - const U32 hashLog = zc->appliedParams.cParams.hashLog; - const size_t h = ZSTD_hashPtr(ip, hashLog, mls); - U32* const hashTable = zc->hashTable; - U32 matchIndex = hashTable[h]; - U32* const bt = zc->chainTable; - const U32 btLog = zc->appliedParams.cParams.chainLog - 1; - const U32 btMask= (1U << btLog) - 1; - size_t commonLengthSmaller=0, commonLengthLarger=0; - const BYTE* const dictBase = zc->dictBase; - const U32 dictLimit = zc->dictLimit; - const BYTE* const dictEnd = dictBase + dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const U32 btLow = btMask >= current ? 0 : current - btMask; - const U32 windowLow = zc->lowLimit; - U32* smallerPtr = bt + 2*(current&btMask); - U32* largerPtr = bt + 2*(current&btMask) + 1; - U32 matchEndIdx = current+8; - U32 dummy32; /* to be nullified at the end */ - U32 mnum = 0; - - const U32 minMatch = (mls == 3) ? 3 : 4; - size_t bestLength = minMatchLen-1; - - if (minMatch == 3) { /* HC3 match finder */ - U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); - if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { - const BYTE* match; - size_t currentMl=0; - if ((!extDict) || matchIndex3 >= dictLimit) { - match = base + matchIndex3; - if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); - } else { - match = dictBase + matchIndex3; - if (ZSTD_readMINMATCH(match, MINMATCH) == ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ - currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; - } - - /* save best solution */ - if (currentMl > bestLength) { - bestLength = currentMl; - matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; - matches[mnum].len = (U32)currentMl; - mnum++; - if (currentMl > ZSTD_OPT_NUM) goto update; - if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ - } - } - } - - hashTable[h] = current; /* Update Hash Table */ - - while (nbCompares-- && (matchIndex > windowLow)) { - U32* nextPtr = bt + 2*(matchIndex & btMask); - size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ - const BYTE* match; - - if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { - match = base + matchIndex; - if (match[matchLength] == ip[matchLength]) { - matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1; - } - } else { - match = dictBase + matchIndex; - matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); - if (matchIndex+matchLength >= dictLimit) - match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ - } - - if (matchLength > bestLength) { - if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; - bestLength = matchLength; - matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; - matches[mnum].len = (U32)matchLength; - mnum++; - if (matchLength > ZSTD_OPT_NUM) break; - if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */ - break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - - if (match[matchLength] < ip[matchLength]) { - /* match is smaller than current */ - *smallerPtr = matchIndex; /* update smaller idx */ - commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ - if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ - matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ - } else { - /* match is larger than current */ - *largerPtr = matchIndex; - commonLengthLarger = matchLength; - if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ - largerPtr = nextPtr; - matchIndex = nextPtr[0]; - } } - - *smallerPtr = *largerPtr = 0; - -update: - zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; - return mnum; -} - - -/** Tree updater, providing best match */ -static U32 ZSTD_BtGetAllMatches ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iLimit, - const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) -{ - if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); -} - - -static U32 ZSTD_BtGetAllMatches_selectMLS ( - ZSTD_CCtx* zc, /* Index table will be updated */ - const BYTE* ip, const BYTE* const iHighLimit, - const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) -{ - switch(matchLengthSearch) - { - case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); - default : - case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); - case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); - case 7 : - case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); - } -} - -/** Tree updater, providing best match */ -static U32 ZSTD_BtGetAllMatches_extDict ( - ZSTD_CCtx* zc, - const BYTE* const ip, const BYTE* const iLimit, - const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) -{ - if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); - return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); -} - - -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict ( - ZSTD_CCtx* zc, /* Index table will be updated */ - const BYTE* ip, const BYTE* const iHighLimit, - const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) -{ - switch(matchLengthSearch) - { - case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); - default : - case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); - case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); - case 7 : - case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); - } -} - - -/*-******************************* -* Optimal parser -*********************************/ -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, const int ultra) -{ - seqStore_t* seqStorePtr = &(ctx->seqStore); - optState_t* optStatePtr = &(ctx->optState); - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ctx->base; - const BYTE* const prefixStart = base + ctx->dictLimit; - - const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog; - const U32 sufficient_len = ctx->appliedParams.cParams.targetLength; - const U32 mls = ctx->appliedParams.cParams.searchLength; - const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4; - - ZSTD_optimal_t* opt = optStatePtr->priceTable; - ZSTD_match_t* matches = optStatePtr->matchTable; - const BYTE* inr; - U32 offset, rep[ZSTD_REP_NUM]; - - /* init */ - ctx->nextToUpdate3 = ctx->nextToUpdate; - ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize); - ip += (ip==prefixStart); - { U32 i; for (i=0; irep[i]; } - - /* Match Loop */ - while (ip < ilimit) { - U32 cur, match_num, last_pos, litlen, price; - U32 u, mlen, best_mlen, best_off, litLength; - memset(opt, 0, sizeof(ZSTD_optimal_t)); - last_pos = 0; - litlen = (U32)(ip - anchor); - - /* check repCode */ - { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); - for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) - && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) { - mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch; - if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; - goto _storeSequence; - } - best_off = i - (ip == anchor); - do { - price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ - mlen--; - } while (mlen >= minMatch); - } } } - - match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); - - if (!last_pos && !match_num) { ip++; continue; } - - if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - - /* set prices using matches at position = 0 */ - best_mlen = (last_pos) ? last_pos : minMatch; - for (u = 0; u < match_num; u++) { - mlen = (u>0) ? matches[u-1].len+1 : best_mlen; - best_mlen = matches[u].len; - while (mlen <= best_mlen) { - price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ - mlen++; - } } - - if (last_pos < minMatch) { ip++; continue; } - - /* initialize opt[0] */ - { U32 i ; for (i=0; i litlen) { - price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen); - } else - price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor); - } else { - litlen = 1; - price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1); - } - - if (cur > last_pos || price <= opt[cur].price) - SET_PRICE(cur, 1, 0, litlen, price); - - if (cur == last_pos) break; - - if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ - continue; - - mlen = opt[cur].mlen; - if (opt[cur].off > ZSTD_REP_MOVE_OPT) { - opt[cur].rep[2] = opt[cur-mlen].rep[1]; - opt[cur].rep[1] = opt[cur-mlen].rep[0]; - opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; - } else { - opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; - opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; - opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); - } - - best_mlen = minMatch; - { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); - for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) - && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) { - mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch; - - if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; best_off = i; last_pos = cur + 1; - goto _storeSequence; - } - - best_off = i - (opt[cur].mlen != 1); - if (mlen > best_mlen) best_mlen = mlen; - - do { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); - } else - price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || price <= opt[cur + mlen].price) - SET_PRICE(cur + mlen, mlen, i, litlen, price); - mlen--; - } while (mlen >= minMatch); - } } } - - match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); - - if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - last_pos = cur + 1; - goto _storeSequence; - } - - /* set prices using matches at position = cur */ - for (u = 0; u < match_num; u++) { - mlen = (u>0) ? matches[u-1].len+1 : best_mlen; - best_mlen = matches[u].len; - - while (mlen <= best_mlen) { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) - price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); - else - price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) - SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); - - mlen++; - } } } - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - - /* store sequence */ -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ - opt[0].mlen = 1; - - while (1) { - mlen = opt[cur].mlen; - offset = opt[cur].off; - opt[cur].mlen = best_mlen; - opt[cur].off = best_off; - best_mlen = mlen; - best_off = offset; - if (mlen > cur) break; - cur -= mlen; - } - - for (u = 0; u <= last_pos;) { - u += opt[u].mlen; - } - - for (cur=0; cur < last_pos; ) { - mlen = opt[cur].mlen; - if (mlen == 1) { ip++; cur++; continue; } - offset = opt[cur].off; - cur += mlen; - litLength = (U32)(ip - anchor); - - if (offset > ZSTD_REP_MOVE_OPT) { - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = offset - ZSTD_REP_MOVE_OPT; - offset--; - } else { - if (offset != 0) { - best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); - if (offset != 1) rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = best_off; - } - if (litLength==0) offset--; - } - - ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH); - ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); - anchor = ip = ip + mlen; - } } /* for (cur=0; cur < last_pos; ) */ - - /* Save reps for next block */ - { int i; for (i=0; irepToConfirm[i] = rep[i]; } - - /* Last Literals */ - { size_t const lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - - -FORCE_INLINE_TEMPLATE -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, - const void* src, size_t srcSize, const int ultra) -{ - seqStore_t* seqStorePtr = &(ctx->seqStore); - optState_t* optStatePtr = &(ctx->optState); - const BYTE* const istart = (const BYTE*)src; - const BYTE* ip = istart; - const BYTE* anchor = istart; - const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = iend - 8; - const BYTE* const base = ctx->base; - const U32 lowestIndex = ctx->lowLimit; - const U32 dictLimit = ctx->dictLimit; - const BYTE* const prefixStart = base + dictLimit; - const BYTE* const dictBase = ctx->dictBase; - const BYTE* const dictEnd = dictBase + dictLimit; - - const U32 maxSearches = 1U << ctx->appliedParams.cParams.searchLog; - const U32 sufficient_len = ctx->appliedParams.cParams.targetLength; - const U32 mls = ctx->appliedParams.cParams.searchLength; - const U32 minMatch = (ctx->appliedParams.cParams.searchLength == 3) ? 3 : 4; - - ZSTD_optimal_t* opt = optStatePtr->priceTable; - ZSTD_match_t* matches = optStatePtr->matchTable; - const BYTE* inr; - - /* init */ - U32 offset, rep[ZSTD_REP_NUM]; - { U32 i; for (i=0; irep[i]; } - - ctx->nextToUpdate3 = ctx->nextToUpdate; - ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize); - ip += (ip==prefixStart); - - /* Match Loop */ - while (ip < ilimit) { - U32 cur, match_num, last_pos, litlen, price; - U32 u, mlen, best_mlen, best_off, litLength; - U32 current = (U32)(ip-base); - memset(opt, 0, sizeof(ZSTD_optimal_t)); - last_pos = 0; - opt[0].litlen = (U32)(ip - anchor); - - /* check repCode */ - { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); - for (i = (ip==anchor); i 0 && repCur <= (S32)current) - && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ - && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { - /* repcode detected we should take it */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; - - if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; - goto _storeSequence; - } - - best_off = i - (ip==anchor); - litlen = opt[0].litlen; - do { - price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ - mlen--; - } while (mlen >= minMatch); - } } } - - match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ - - if (!last_pos && !match_num) { ip++; continue; } - - { U32 i; for (i=0; i sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - cur = 0; - last_pos = 1; - goto _storeSequence; - } - - best_mlen = (last_pos) ? last_pos : minMatch; - - /* set prices using matches at position = 0 */ - for (u = 0; u < match_num; u++) { - mlen = (u>0) ? matches[u-1].len+1 : best_mlen; - best_mlen = matches[u].len; - litlen = opt[0].litlen; - while (mlen <= best_mlen) { - price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); - if (mlen > last_pos || price < opt[mlen].price) - SET_PRICE(mlen, mlen, matches[u].off, litlen, price); - mlen++; - } } - - if (last_pos < minMatch) { - ip++; continue; - } - - /* check further positions */ - for (cur = 1; cur <= last_pos; cur++) { - inr = ip + cur; - - if (opt[cur-1].mlen == 1) { - litlen = opt[cur-1].litlen + 1; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-litlen); - } else - price = ZSTD_getLiteralPrice(optStatePtr, litlen, anchor); - } else { - litlen = 1; - price = opt[cur - 1].price + ZSTD_getLiteralPrice(optStatePtr, litlen, inr-1); - } - - if (cur > last_pos || price <= opt[cur].price) - SET_PRICE(cur, 1, 0, litlen, price); - - if (cur == last_pos) break; - - if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ - continue; - - mlen = opt[cur].mlen; - if (opt[cur].off > ZSTD_REP_MOVE_OPT) { - opt[cur].rep[2] = opt[cur-mlen].rep[1]; - opt[cur].rep[1] = opt[cur-mlen].rep[0]; - opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; - } else { - opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; - opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; - opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); - } - - best_mlen = minMatch; - { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); - for (i = (mlen != 1); i 0 && repCur <= (S32)(current+cur)) - && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ - && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { - /* repcode detected */ - const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; - mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; - - if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { - best_mlen = mlen; best_off = i; last_pos = cur + 1; - goto _storeSequence; - } - - best_off = i - (opt[cur].mlen != 1); - if (mlen > best_mlen) best_mlen = mlen; - - do { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) { - price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); - } else - price = ZSTD_getPrice(optStatePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || price <= opt[cur + mlen].price) - SET_PRICE(cur + mlen, mlen, i, litlen, price); - mlen--; - } while (mlen >= minMatch); - } } } - - match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); - - if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { - best_mlen = matches[match_num-1].len; - best_off = matches[match_num-1].off; - last_pos = cur + 1; - goto _storeSequence; - } - - /* set prices using matches at position = cur */ - for (u = 0; u < match_num; u++) { - mlen = (u>0) ? matches[u-1].len+1 : best_mlen; - best_mlen = matches[u].len; - - while (mlen <= best_mlen) { - if (opt[cur].mlen == 1) { - litlen = opt[cur].litlen; - if (cur > litlen) - price = opt[cur - litlen].price + ZSTD_getPrice(optStatePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); - else - price = ZSTD_getPrice(optStatePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); - } else { - litlen = 0; - price = opt[cur].price + ZSTD_getPrice(optStatePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); - } - - if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) - SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); - - mlen++; - } } } /* for (cur = 1; cur <= last_pos; cur++) */ - - best_mlen = opt[last_pos].mlen; - best_off = opt[last_pos].off; - cur = last_pos - best_mlen; - - /* store sequence */ -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ - opt[0].mlen = 1; - - while (1) { - mlen = opt[cur].mlen; - offset = opt[cur].off; - opt[cur].mlen = best_mlen; - opt[cur].off = best_off; - best_mlen = mlen; - best_off = offset; - if (mlen > cur) break; - cur -= mlen; - } - - for (u = 0; u <= last_pos; ) { - u += opt[u].mlen; - } - - for (cur=0; cur < last_pos; ) { - mlen = opt[cur].mlen; - if (mlen == 1) { ip++; cur++; continue; } - offset = opt[cur].off; - cur += mlen; - litLength = (U32)(ip - anchor); - - if (offset > ZSTD_REP_MOVE_OPT) { - rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = offset - ZSTD_REP_MOVE_OPT; - offset--; - } else { - if (offset != 0) { - best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); - if (offset != 1) rep[2] = rep[1]; - rep[1] = rep[0]; - rep[0] = best_off; - } - - if (litLength==0) offset--; - } - - ZSTD_updatePrice(optStatePtr, litLength, anchor, offset, mlen-MINMATCH); - ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); - anchor = ip = ip + mlen; - } } /* for (cur=0; cur < last_pos; ) */ - - /* Save reps for next block */ - { int i; for (i=0; irepToConfirm[i] = rep[i]; } - - /* Last Literals */ - { size_t lastLLSize = iend - anchor; - memcpy(seqStorePtr->lit, anchor, lastLLSize); - seqStorePtr->lit += lastLLSize; - } -} - -#endif /* ZSTD_OPT_H_91842398743 */ +#endif /* ZSTD_OPT_H */ Index: vendor/zstd/dist/lib/compress/zstdmt_compress.c =================================================================== --- vendor/zstd/dist/lib/compress/zstdmt_compress.c (revision 325596) +++ vendor/zstd/dist/lib/compress/zstdmt_compress.c (revision 325597) @@ -1,1022 +1,1099 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* ====== Tuning parameters ====== */ -#define ZSTDMT_NBTHREADS_MAX 256 +#define ZSTDMT_NBTHREADS_MAX 200 #define ZSTDMT_OVERLAPLOG_DEFAULT 6 /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ #endif /* ====== Dependencies ====== */ #include /* memcpy, memset */ #include "pool.h" /* threadpool */ #include "threading.h" /* mutex */ #include "zstd_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ #include "zstdmt_compress.h" /* ====== Debug ====== */ #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2) # include # include # include # define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); } # define DEBUG_PRINTHEX(l,p,n) { \ unsigned debug_u; \ for (debug_u=0; debug_u<(n); debug_u++) \ DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ DEBUGLOGRAW(l, " \n"); \ } static unsigned long long GetCurrentClockTimeMicroseconds(void) { static clock_t _ticksPerSecond = 0; if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); { struct tms junk; clock_t newTicks = (clock_t) times(&junk); return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); } } #define MUTEX_WAIT_TIME_DLEVEL 6 -#define PTHREAD_MUTEX_LOCK(mutex) { \ - if (ZSTD_DEBUG>=MUTEX_WAIT_TIME_DLEVEL) { \ +#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ + if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) { \ unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ - pthread_mutex_lock(mutex); \ + ZSTD_pthread_mutex_lock(mutex); \ { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ unsigned long long const elapsedTime = (afterTime-beforeTime); \ if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ elapsedTime, #mutex); \ } } \ - } else pthread_mutex_lock(mutex); \ + } else { \ + ZSTD_pthread_mutex_lock(mutex); \ + } \ } #else -# define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m) +# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) # define DEBUG_PRINTHEX(l,p,n) {} #endif /* ===== Buffer Pool ===== */ /* a single Buffer Pool can be invoked from multiple threads in parallel */ typedef struct buffer_s { void* start; size_t size; } buffer_t; static const buffer_t g_nullBuffer = { NULL, 0 }; typedef struct ZSTDMT_bufferPool_s { - pthread_mutex_t poolMutex; + ZSTD_pthread_mutex_t poolMutex; size_t bufferSize; unsigned totalBuffers; unsigned nbBuffers; ZSTD_customMem cMem; buffer_t bTable[1]; /* variable size */ } ZSTDMT_bufferPool; static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads, ZSTD_customMem cMem) { unsigned const maxNbBuffers = 2*nbThreads + 3; ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc( sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); if (bufPool==NULL) return NULL; - if (pthread_mutex_init(&bufPool->poolMutex, NULL)) { + if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { ZSTD_free(bufPool, cMem); return NULL; } bufPool->bufferSize = 64 KB; bufPool->totalBuffers = maxNbBuffers; bufPool->nbBuffers = 0; bufPool->cMem = cMem; return bufPool; } static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) { unsigned u; + DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); if (!bufPool) return; /* compatibility with free on NULL */ - for (u=0; utotalBuffers; u++) + for (u=0; utotalBuffers; u++) { + DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); ZSTD_free(bufPool->bTable[u].start, bufPool->cMem); - pthread_mutex_destroy(&bufPool->poolMutex); + } + ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); ZSTD_free(bufPool, bufPool->cMem); } /* only works at initialization, not during compression */ static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) { size_t const poolSize = sizeof(*bufPool) + (bufPool->totalBuffers - 1) * sizeof(buffer_t); unsigned u; size_t totalBufferSize = 0; - pthread_mutex_lock(&bufPool->poolMutex); + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); for (u=0; utotalBuffers; u++) totalBufferSize += bufPool->bTable[u].size; - pthread_mutex_unlock(&bufPool->poolMutex); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return poolSize + totalBufferSize; } static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* bufPool, size_t bSize) { bufPool->bufferSize = bSize; } /** ZSTDMT_getBuffer() : * assumption : bufPool must be valid */ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) { size_t const bSize = bufPool->bufferSize; DEBUGLOG(5, "ZSTDMT_getBuffer"); - pthread_mutex_lock(&bufPool->poolMutex); + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers) { /* try to use an existing buffer */ buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; size_t const availBufferSize = buf.size; + bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) { /* large enough, but not too much */ - pthread_mutex_unlock(&bufPool->poolMutex); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return buf; } /* size conditions not respected : scratch this buffer, create new one */ DEBUGLOG(5, "existing buffer does not meet size conditions => freeing"); ZSTD_free(buf.start, bufPool->cMem); } - pthread_mutex_unlock(&bufPool->poolMutex); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* create new buffer */ DEBUGLOG(5, "create a new buffer"); { buffer_t buffer; void* const start = ZSTD_malloc(bSize, bufPool->cMem); buffer.start = start; /* note : start can be NULL if malloc fails ! */ buffer.size = (start==NULL) ? 0 : bSize; return buffer; } } /* store buffer for later re-use, up to pool capacity */ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) { if (buf.start == NULL) return; /* compatible with release on NULL */ DEBUGLOG(5, "ZSTDMT_releaseBuffer"); - pthread_mutex_lock(&bufPool->poolMutex); + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); if (bufPool->nbBuffers < bufPool->totalBuffers) { bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ - pthread_mutex_unlock(&bufPool->poolMutex); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); return; } - pthread_mutex_unlock(&bufPool->poolMutex); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); /* Reached bufferPool capacity (should not happen) */ DEBUGLOG(5, "buffer pool capacity reached => freeing "); ZSTD_free(buf.start, bufPool->cMem); } +/* Sets parameters relevant to the compression job, initializing others to + * default values. Notably, nbThreads should probably be zero. */ +static ZSTD_CCtx_params ZSTDMT_makeJobCCtxParams(ZSTD_CCtx_params const params) +{ + ZSTD_CCtx_params jobParams; + memset(&jobParams, 0, sizeof(jobParams)); + jobParams.cParams = params.cParams; + jobParams.fParams = params.fParams; + jobParams.compressionLevel = params.compressionLevel; + + jobParams.ldmParams = params.ldmParams; + return jobParams; +} + /* ===== CCtx Pool ===== */ /* a single CCtx Pool can be invoked from multiple threads in parallel */ typedef struct { - pthread_mutex_t poolMutex; + ZSTD_pthread_mutex_t poolMutex; unsigned totalCCtx; unsigned availCCtx; ZSTD_customMem cMem; ZSTD_CCtx* cctx[1]; /* variable size */ } ZSTDMT_CCtxPool; /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) { unsigned u; for (u=0; utotalCCtx; u++) ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */ - pthread_mutex_destroy(&pool->poolMutex); + ZSTD_pthread_mutex_destroy(&pool->poolMutex); ZSTD_free(pool, pool->cMem); } /* ZSTDMT_createCCtxPool() : * implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads, ZSTD_customMem cMem) { ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc( sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*), cMem); if (!cctxPool) return NULL; - if (pthread_mutex_init(&cctxPool->poolMutex, NULL)) { + if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { ZSTD_free(cctxPool, cMem); return NULL; } cctxPool->cMem = cMem; cctxPool->totalCCtx = nbThreads; cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } DEBUGLOG(3, "cctxPool created, with %u threads", nbThreads); return cctxPool; } /* only works during initialization phase, not during compression */ static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) { - pthread_mutex_lock(&cctxPool->poolMutex); + ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); { unsigned const nbThreads = cctxPool->totalCCtx; size_t const poolSize = sizeof(*cctxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*); unsigned u; size_t totalCCtxSize = 0; for (u=0; ucctx[u]); } - pthread_mutex_unlock(&cctxPool->poolMutex); + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return poolSize + totalCCtxSize; } } static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) { DEBUGLOG(5, "ZSTDMT_getCCtx"); - pthread_mutex_lock(&cctxPool->poolMutex); + ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); if (cctxPool->availCCtx) { cctxPool->availCCtx--; { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; - pthread_mutex_unlock(&cctxPool->poolMutex); + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); return cctx; } } - pthread_mutex_unlock(&cctxPool->poolMutex); + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); DEBUGLOG(5, "create one more CCtx"); return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ } static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) { if (cctx==NULL) return; /* compatibility with release on NULL */ - pthread_mutex_lock(&pool->poolMutex); + ZSTD_pthread_mutex_lock(&pool->poolMutex); if (pool->availCCtx < pool->totalCCtx) pool->cctx[pool->availCCtx++] = cctx; else { /* pool overflow : should not happen, since totalCCtx==nbThreads */ DEBUGLOG(5, "CCtx pool overflow : free cctx"); ZSTD_freeCCtx(cctx); } - pthread_mutex_unlock(&pool->poolMutex); + ZSTD_pthread_mutex_unlock(&pool->poolMutex); } /* ===== Thread worker ===== */ typedef struct { buffer_t src; const void* srcStart; size_t dictSize; size_t srcSize; buffer_t dstBuff; size_t cSize; size_t dstFlushed; unsigned firstChunk; unsigned lastChunk; unsigned jobCompleted; unsigned jobScanned; - pthread_mutex_t* jobCompleted_mutex; - pthread_cond_t* jobCompleted_cond; - ZSTD_parameters params; + ZSTD_pthread_mutex_t* jobCompleted_mutex; + ZSTD_pthread_cond_t* jobCompleted_cond; + ZSTD_CCtx_params params; const ZSTD_CDict* cdict; ZSTDMT_CCtxPool* cctxPool; ZSTDMT_bufferPool* bufPool; unsigned long long fullFrameSize; } ZSTDMT_jobDescription; /* ZSTDMT_compressChunk() : POOL_function type */ void ZSTDMT_compressChunk(void* jobDescription) { ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; ZSTD_CCtx* cctx = ZSTDMT_getCCtx(job->cctxPool); const void* const src = (const char*)job->srcStart + job->dictSize; buffer_t dstBuff = job->dstBuff; DEBUGLOG(5, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize); if (cctx==NULL) { job->cSize = ERROR(memory_allocation); goto _endJob; } if (dstBuff.start == NULL) { dstBuff = ZSTDMT_getBuffer(job->bufPool); if (dstBuff.start==NULL) { job->cSize = ERROR(memory_allocation); goto _endJob; } job->dstBuff = dstBuff; } if (job->cdict) { /* should only happen for first segment */ size_t const initError = ZSTD_compressBegin_usingCDict_advanced(cctx, job->cdict, job->params.fParams, job->fullFrameSize); DEBUGLOG(5, "using CDict"); if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; } } else { /* srcStart points at reloaded section */ if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */ - { size_t const dictModeError = ZSTD_setCCtxParameter(cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */ - size_t const initError = ZSTD_compressBegin_advanced(cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize); - if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; } - ZSTD_setCCtxParameter(cctx, ZSTD_p_forceWindow, 1); + { ZSTD_CCtx_params jobParams = job->params; + size_t const forceWindowError = + ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstChunk); + /* Force loading dictionary in "content-only" mode (no header analysis) */ + size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, job->srcStart, job->dictSize, ZSTD_dm_rawContent, jobParams, job->fullFrameSize); + if (ZSTD_isError(initError) || ZSTD_isError(forceWindowError)) { + job->cSize = initError; + goto _endJob; + } } } if (!job->firstChunk) { /* flush and overwrite frame header when it's not first segment */ size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, 0); if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; } ZSTD_invalidateRepCodes(cctx); } DEBUGLOG(5, "Compressing : "); DEBUG_PRINTHEX(4, job->srcStart, 12); job->cSize = (job->lastChunk) ? ZSTD_compressEnd (cctx, dstBuff.start, dstBuff.size, src, job->srcSize) : ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.size, src, job->srcSize); DEBUGLOG(5, "compressed %u bytes into %u bytes (first:%u) (last:%u)", (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk); DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize)); _endJob: ZSTDMT_releaseCCtx(job->cctxPool, cctx); ZSTDMT_releaseBuffer(job->bufPool, job->src); job->src = g_nullBuffer; job->srcStart = NULL; - PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex); + ZSTD_PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex); job->jobCompleted = 1; job->jobScanned = 0; - pthread_cond_signal(job->jobCompleted_cond); - pthread_mutex_unlock(job->jobCompleted_mutex); + ZSTD_pthread_cond_signal(job->jobCompleted_cond); + ZSTD_pthread_mutex_unlock(job->jobCompleted_mutex); } /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ typedef struct { buffer_t buffer; size_t filled; } inBuff_t; struct ZSTDMT_CCtx_s { POOL_ctx* factory; ZSTDMT_jobDescription* jobs; ZSTDMT_bufferPool* bufPool; ZSTDMT_CCtxPool* cctxPool; - pthread_mutex_t jobCompleted_mutex; - pthread_cond_t jobCompleted_cond; + ZSTD_pthread_mutex_t jobCompleted_mutex; + ZSTD_pthread_cond_t jobCompleted_cond; size_t targetSectionSize; size_t inBuffSize; size_t dictSize; size_t targetDictSize; inBuff_t inBuff; - ZSTD_parameters params; + ZSTD_CCtx_params params; XXH64_state_t xxhState; - unsigned nbThreads; unsigned jobIDMask; unsigned doneJobID; unsigned nextJobID; unsigned frameEnded; unsigned allJobsCompleted; - unsigned overlapLog; unsigned long long frameContentSize; - size_t sectionSize; ZSTD_customMem cMem; ZSTD_CDict* cdictLocal; const ZSTD_CDict* cdict; }; static ZSTDMT_jobDescription* ZSTDMT_allocJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem) { U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; U32 const nbJobs = 1 << nbJobsLog2; *nbJobsPtr = nbJobs; return (ZSTDMT_jobDescription*) ZSTD_calloc( nbJobs * sizeof(ZSTDMT_jobDescription), cMem); } +/* Internal only */ +size_t ZSTDMT_initializeCCtxParameters(ZSTD_CCtx_params* params, unsigned nbThreads) +{ + params->nbThreads = nbThreads; + params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT; + params->jobSize = 0; + return 0; +} + ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem) { ZSTDMT_CCtx* mtctx; U32 nbJobs = nbThreads + 2; DEBUGLOG(3, "ZSTDMT_createCCtx_advanced"); if (nbThreads < 1) return NULL; nbThreads = MIN(nbThreads , ZSTDMT_NBTHREADS_MAX); if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) /* invalid custom allocator */ return NULL; mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem); if (!mtctx) return NULL; + ZSTDMT_initializeCCtxParameters(&mtctx->params, nbThreads); mtctx->cMem = cMem; - mtctx->nbThreads = nbThreads; mtctx->allJobsCompleted = 1; - mtctx->sectionSize = 0; - mtctx->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT; - mtctx->factory = POOL_create(nbThreads, 0); + mtctx->factory = POOL_create_advanced(nbThreads, 0, cMem); mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, cMem); mtctx->jobIDMask = nbJobs - 1; mtctx->bufPool = ZSTDMT_createBufferPool(nbThreads, cMem); mtctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads, cMem); if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool) { ZSTDMT_freeCCtx(mtctx); return NULL; } - if (pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) { + if (ZSTD_pthread_mutex_init(&mtctx->jobCompleted_mutex, NULL)) { ZSTDMT_freeCCtx(mtctx); return NULL; } - if (pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) { + if (ZSTD_pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) { ZSTDMT_freeCCtx(mtctx); return NULL; } DEBUGLOG(3, "mt_cctx created, for %u threads", nbThreads); return mtctx; } ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads) { return ZSTDMT_createCCtx_advanced(nbThreads, ZSTD_defaultCMem); } /* ZSTDMT_releaseAllJobResources() : * note : ensure all workers are killed first ! */ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) { unsigned jobID; DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); mtctx->jobs[jobID].dstBuff = g_nullBuffer; + DEBUGLOG(4, "job%02u: release src address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].src.start); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].src); mtctx->jobs[jobID].src = g_nullBuffer; } memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription)); + DEBUGLOG(4, "input: release address %08X", (U32)(size_t)mtctx->inBuff.buffer.start); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer); mtctx->inBuff.buffer = g_nullBuffer; mtctx->allJobsCompleted = 1; } +static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) +{ + DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); + while (zcs->doneJobID < zcs->nextJobID) { + unsigned const jobID = zcs->doneJobID & zcs->jobIDMask; + ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + while (zcs->jobs[jobID].jobCompleted==0) { + DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */ + ZSTD_pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); + } + ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex); + zcs->doneJobID++; + } +} + size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) { if (mtctx==NULL) return 0; /* compatible with free on NULL */ - POOL_free(mtctx->factory); - if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */ - ZSTDMT_freeBufferPool(mtctx->bufPool); /* release job resources into pools first */ + POOL_free(mtctx->factory); /* stop and free worker threads */ + ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ ZSTD_free(mtctx->jobs, mtctx->cMem); + ZSTDMT_freeBufferPool(mtctx->bufPool); ZSTDMT_freeCCtxPool(mtctx->cctxPool); ZSTD_freeCDict(mtctx->cdictLocal); - pthread_mutex_destroy(&mtctx->jobCompleted_mutex); - pthread_cond_destroy(&mtctx->jobCompleted_cond); + ZSTD_pthread_mutex_destroy(&mtctx->jobCompleted_mutex); + ZSTD_pthread_cond_destroy(&mtctx->jobCompleted_cond); ZSTD_free(mtctx, mtctx->cMem); return 0; } size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) { if (mtctx == NULL) return 0; /* supports sizeof NULL */ return sizeof(*mtctx) + POOL_sizeof(mtctx->factory) + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + ZSTD_sizeof_CDict(mtctx->cdictLocal); } -size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value) -{ +/* Internal only */ +size_t ZSTDMT_CCtxParam_setMTCtxParameter( + ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value) { switch(parameter) { case ZSTDMT_p_sectionSize : - mtctx->sectionSize = value; + params->jobSize = value; return 0; case ZSTDMT_p_overlapSectionLog : - DEBUGLOG(5, "ZSTDMT_p_overlapSectionLog : %u", value); - mtctx->overlapLog = (value >= 9) ? 9 : value; + DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value); + params->overlapSizeLog = (value >= 9) ? 9 : value; return 0; default : return ERROR(parameter_unsupported); } } +size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value) +{ + switch(parameter) + { + case ZSTDMT_p_sectionSize : + return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); + case ZSTDMT_p_overlapSectionLog : + return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); + default : + return ERROR(parameter_unsupported); + } +} /* ------------------------------------------ */ /* ===== Multi-threaded compression ===== */ /* ------------------------------------------ */ static unsigned computeNbChunks(size_t srcSize, unsigned windowLog, unsigned nbThreads) { size_t const chunkSizeTarget = (size_t)1 << (windowLog + 2); size_t const chunkMaxSize = chunkSizeTarget << 2; size_t const passSizeMax = chunkMaxSize * nbThreads; unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; unsigned const nbChunksLarge = multiplier * nbThreads; unsigned const nbChunksMax = (unsigned)(srcSize / chunkSizeTarget) + 1; unsigned const nbChunksSmall = MIN(nbChunksMax, nbThreads); return (multiplier>1) ? nbChunksLarge : nbChunksSmall; } - -size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_parameters const params, - unsigned overlapLog) +static size_t ZSTDMT_compress_advanced_internal( + ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params const params) { - unsigned const overlapRLog = (overlapLog>9) ? 0 : 9-overlapLog; + ZSTD_CCtx_params const jobParams = ZSTDMT_makeJobCCtxParams(params); + unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog; size_t const overlapSize = (overlapRLog>=9) ? 0 : (size_t)1 << (params.cParams.windowLog - overlapRLog); - unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, mtctx->nbThreads); + unsigned nbChunks = computeNbChunks(srcSize, params.cParams.windowLog, params.nbThreads); size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks; size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */ const char* const srcStart = (const char*)src; size_t remainingSrcSize = srcSize; unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */ size_t frameStartPos = 0, dstBufferPos = 0; XXH64_state_t xxh64; + assert(jobParams.nbThreads == 0); + assert(mtctx->cctxPool->totalCCtx == params.nbThreads); DEBUGLOG(4, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize); if (nbChunks==1) { /* fallback to single-thread mode */ ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; - if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, params.fParams); - return ZSTD_compress_advanced(cctx, dst, dstCapacity, src, srcSize, NULL, 0, params); + if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams); } assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */ ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) ); XXH64_reset(&xxh64, 0); if (nbChunks > mtctx->jobIDMask+1) { /* enlarge job table */ U32 nbJobs = nbChunks; ZSTD_free(mtctx->jobs, mtctx->cMem); mtctx->jobIDMask = 0; mtctx->jobs = ZSTDMT_allocJobsTable(&nbJobs, mtctx->cMem); if (mtctx->jobs==NULL) return ERROR(memory_allocation); mtctx->jobIDMask = nbJobs - 1; } { unsigned u; for (u=0; ujobs[u].src = g_nullBuffer; mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize; mtctx->jobs[u].dictSize = dictSize; mtctx->jobs[u].srcSize = chunkSize; mtctx->jobs[u].cdict = mtctx->nextJobID==0 ? cdict : NULL; mtctx->jobs[u].fullFrameSize = srcSize; - mtctx->jobs[u].params = params; + mtctx->jobs[u].params = jobParams; /* do not calculate checksum within sections, but write it in header for first section */ if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0; mtctx->jobs[u].dstBuff = dstBuffer; mtctx->jobs[u].cctxPool = mtctx->cctxPool; mtctx->jobs[u].bufPool = mtctx->bufPool; mtctx->jobs[u].firstChunk = (u==0); mtctx->jobs[u].lastChunk = (u==nbChunks-1); mtctx->jobs[u].jobCompleted = 0; mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex; mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond; if (params.fParams.checksumFlag) { XXH64_update(&xxh64, srcStart + frameStartPos, chunkSize); } DEBUGLOG(5, "posting job %u (%u bytes)", u, (U32)chunkSize); DEBUG_PRINTHEX(6, mtctx->jobs[u].srcStart, 12); POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]); frameStartPos += chunkSize; dstBufferPos += dstBufferCapacity; remainingSrcSize -= chunkSize; } } /* collect result */ { size_t error = 0, dstPos = 0; unsigned chunkID; for (chunkID=0; chunkIDjobCompleted_mutex); + ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex); while (mtctx->jobs[chunkID].jobCompleted==0) { DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", chunkID); - pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex); + ZSTD_pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex); } - pthread_mutex_unlock(&mtctx->jobCompleted_mutex); + ZSTD_pthread_mutex_unlock(&mtctx->jobCompleted_mutex); DEBUGLOG(5, "ready to write chunk %u ", chunkID); mtctx->jobs[chunkID].srcStart = NULL; { size_t const cSize = mtctx->jobs[chunkID].cSize; if (ZSTD_isError(cSize)) error = cSize; if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); if (chunkID) { /* note : chunk 0 is written directly at dst, which is correct position */ if (!error) memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap when chunk compressed within dst */ if (chunkID >= compressWithinDst) { /* chunk compressed into its own buffer, which must be released */ DEBUGLOG(5, "releasing buffer %u>=%u", chunkID, compressWithinDst); ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[chunkID].dstBuff); - } - mtctx->jobs[chunkID].dstBuff = g_nullBuffer; - } + } } + mtctx->jobs[chunkID].dstBuff = g_nullBuffer; dstPos += cSize ; } } /* for (chunkID=0; chunkID dstCapacity) { error = ERROR(dstSize_tooSmall); } else { DEBUGLOG(4, "writing checksum : %08X \n", checksum); MEM_writeLE32((char*)dst + dstPos, checksum); dstPos += 4; } } if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos); return error ? error : dstPos; } } +size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, + ZSTD_parameters const params, + unsigned overlapLog) +{ + ZSTD_CCtx_params cctxParams = mtctx->params; + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + cctxParams.overlapSizeLog = overlapLog; + return ZSTDMT_compress_advanced_internal(mtctx, + dst, dstCapacity, + src, srcSize, + cdict, cctxParams); +} + size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT; ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); params.fParams.contentSizeFlag = 1; return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog); } /* ====================================== */ /* ======= Streaming API ======= */ /* ====================================== */ -static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) +size_t ZSTDMT_initCStream_internal( + ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode, + const ZSTD_CDict* cdict, ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize) { - DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); - while (zcs->doneJobID < zcs->nextJobID) { - unsigned const jobID = zcs->doneJobID & zcs->jobIDMask; - PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); - while (zcs->jobs[jobID].jobCompleted==0) { - DEBUGLOG(5, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */ - pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); - } - pthread_mutex_unlock(&zcs->jobCompleted_mutex); - zcs->doneJobID++; - } -} - - -/** ZSTDMT_initCStream_internal() : - * internal usage only */ -size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, - const void* dict, size_t dictSize, const ZSTD_CDict* cdict, - ZSTD_parameters params, unsigned long long pledgedSrcSize) -{ DEBUGLOG(4, "ZSTDMT_initCStream_internal"); /* params are supposed to be fully validated at this point */ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + assert(zcs->cctxPool->totalCCtx == params.nbThreads); - if (zcs->nbThreads==1) { + if (params.nbThreads==1) { + ZSTD_CCtx_params const singleThreadParams = ZSTDMT_makeJobCCtxParams(params); DEBUGLOG(4, "single thread mode"); + assert(singleThreadParams.nbThreads == 0); return ZSTD_initCStream_internal(zcs->cctxPool->cctx[0], - dict, dictSize, cdict, - params, pledgedSrcSize); + dict, dictSize, cdict, + singleThreadParams, pledgedSrcSize); } if (zcs->allJobsCompleted == 0) { /* previous compression not correctly finished */ ZSTDMT_waitForAllJobsCompleted(zcs); ZSTDMT_releaseAllJobResources(zcs); zcs->allJobsCompleted = 1; } zcs->params = params; zcs->frameContentSize = pledgedSrcSize; if (dict) { DEBUGLOG(4,"cdictLocal: %08X", (U32)(size_t)zcs->cdictLocal); ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, - 0 /* byRef */, ZSTD_dm_auto, /* note : a loadPrefix becomes an internal CDict */ + ZSTD_dlm_byCopy, dictMode, /* note : a loadPrefix becomes an internal CDict */ params.cParams, zcs->cMem); zcs->cdict = zcs->cdictLocal; if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); } else { DEBUGLOG(4,"cdictLocal: %08X", (U32)(size_t)zcs->cdictLocal); ZSTD_freeCDict(zcs->cdictLocal); zcs->cdictLocal = NULL; zcs->cdict = cdict; } - zcs->targetDictSize = (zcs->overlapLog==0) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - (9 - zcs->overlapLog)); - DEBUGLOG(4, "overlapLog : %u ", zcs->overlapLog); + zcs->targetDictSize = (params.overlapSizeLog==0) ? 0 : (size_t)1 << (params.cParams.windowLog - (9 - params.overlapSizeLog)); + DEBUGLOG(4, "overlapLog : %u ", params.overlapSizeLog); DEBUGLOG(4, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10)); - zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2); + zcs->targetSectionSize = params.jobSize ? params.jobSize : (size_t)1 << (params.cParams.windowLog + 2); zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize); zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize); DEBUGLOG(4, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10)); zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize; ZSTDMT_setBufferSize(zcs->bufPool, MAX(zcs->inBuffSize, ZSTD_compressBound(zcs->targetSectionSize)) ); zcs->inBuff.buffer = g_nullBuffer; zcs->dictSize = 0; zcs->doneJobID = 0; zcs->nextJobID = 0; zcs->frameEnded = 0; zcs->allJobsCompleted = 0; if (params.fParams.checksumFlag) XXH64_reset(&zcs->xxhState, 0); return 0; } size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { + ZSTD_CCtx_params cctxParams = mtctx->params; DEBUGLOG(5, "ZSTDMT_initCStream_advanced"); - return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, NULL, params, pledgedSrcSize); + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dm_auto, NULL, + cctxParams, pledgedSrcSize); } size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) { - ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict); + ZSTD_CCtx_params cctxParams = mtctx->params; + cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); + cctxParams.fParams = fParams; if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */ - params.fParams = fParams; - return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, cdict, - params, pledgedSrcSize); + return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dm_auto, cdict, + cctxParams, pledgedSrcSize); } /* ZSTDMT_resetCStream() : * pledgedSrcSize is optional and can be zero == unknown */ size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize) { - if (zcs->nbThreads==1) + if (zcs->params.nbThreads==1) return ZSTD_resetCStream(zcs->cctxPool->cctx[0], pledgedSrcSize); - return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize); + return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, 0, zcs->params, + pledgedSrcSize); } size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); - return ZSTDMT_initCStream_internal(zcs, NULL, 0, NULL, params, 0); + ZSTD_CCtx_params cctxParams = zcs->params; + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, NULL, cctxParams, 0); } static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame) { unsigned const jobID = zcs->nextJobID & zcs->jobIDMask; DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize); zcs->jobs[jobID].src = zcs->inBuff.buffer; zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start; zcs->jobs[jobID].srcSize = srcSize; zcs->jobs[jobID].dictSize = zcs->dictSize; assert(zcs->inBuff.filled >= srcSize + zcs->dictSize); zcs->jobs[jobID].params = zcs->params; /* do not calculate checksum within sections, but write it in header for first section */ if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0; zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL; zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize; zcs->jobs[jobID].dstBuff = g_nullBuffer; zcs->jobs[jobID].cctxPool = zcs->cctxPool; zcs->jobs[jobID].bufPool = zcs->bufPool; zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0); zcs->jobs[jobID].lastChunk = endFrame; zcs->jobs[jobID].jobCompleted = 0; zcs->jobs[jobID].dstFlushed = 0; zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex; zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond; if (zcs->params.fParams.checksumFlag) XXH64_update(&zcs->xxhState, (const char*)zcs->inBuff.buffer.start + zcs->dictSize, srcSize); /* get a new buffer for next input */ if (!endFrame) { size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize); zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->bufPool); if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */ zcs->jobs[jobID].jobCompleted = 1; zcs->nextJobID++; ZSTDMT_waitForAllJobsCompleted(zcs); ZSTDMT_releaseAllJobResources(zcs); return ERROR(memory_allocation); } zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize; memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled); zcs->dictSize = newDictSize; } else { /* if (endFrame==1) */ zcs->inBuff.buffer = g_nullBuffer; zcs->inBuff.filled = 0; zcs->dictSize = 0; zcs->frameEnded = 1; if (zcs->nextJobID == 0) { /* single chunk exception : checksum is calculated directly within worker thread */ zcs->params.fParams.checksumFlag = 0; } } DEBUGLOG(4, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask); POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]); /* this call is blocking when thread worker pool is exhausted */ zcs->nextJobID++; return 0; } /* ZSTDMT_flushNextJob() : * output : will be updated with amount of data flushed . * blockToFlush : if >0, the function will block and wait if there is no data available to flush . * @return : amount of data remaining within internal buffer, 1 if unknown but > 0, 0 if no more, or an error code */ static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush) { unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask; if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */ - PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + ZSTD_PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); while (zcs->jobs[wJobID].jobCompleted==0) { DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID); - if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */ - pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */ + if (!blockToFlush) { ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */ + ZSTD_pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */ } - pthread_mutex_unlock(&zcs->jobCompleted_mutex); + ZSTD_pthread_mutex_unlock(&zcs->jobCompleted_mutex); /* compression job completed : output can be flushed */ { ZSTDMT_jobDescription job = zcs->jobs[wJobID]; if (!job.jobScanned) { if (ZSTD_isError(job.cSize)) { DEBUGLOG(5, "compression error detected "); ZSTDMT_waitForAllJobsCompleted(zcs); ZSTDMT_releaseAllJobResources(zcs); return job.cSize; } DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag); if (zcs->params.fParams.checksumFlag) { if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */ U32 const checksum = (U32)XXH64_digest(&zcs->xxhState); DEBUGLOG(5, "writing checksum : %08X \n", checksum); MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum); job.cSize += 4; zcs->jobs[wJobID].cSize += 4; } } zcs->jobs[wJobID].jobScanned = 1; } { size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos); DEBUGLOG(5, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID); memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite); output->pos += toWrite; job.dstFlushed += toWrite; } if (job.dstFlushed == job.cSize) { /* output buffer fully flushed => move to next one */ ZSTDMT_releaseBuffer(zcs->bufPool, job.dstBuff); zcs->jobs[wJobID].dstBuff = g_nullBuffer; zcs->jobs[wJobID].jobCompleted = 0; zcs->doneJobID++; } else { zcs->jobs[wJobID].dstFlushed = job.dstFlushed; } /* return value : how many bytes left in buffer ; fake it to 1 if unknown but >0 */ if (job.cSize > job.dstFlushed) return (job.cSize - job.dstFlushed); if (zcs->doneJobID < zcs->nextJobID) return 1; /* still some buffer to flush */ zcs->allJobsCompleted = zcs->frameEnded; /* frame completed and entirely flushed */ return 0; /* everything flushed */ } } /** ZSTDMT_compressStream_generic() : - * internal use only + * internal use only - exposed to be invoked from zstd_compress.c * assumption : output and input are valid (pos <= size) * @return : minimum amount of data remaining to flush, 0 if none */ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp) { size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize; + unsigned forwardInputProgress = 0; assert(output->pos <= output->size); assert(input->pos <= input->size); if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { - /* current frame being ended. Only flush/end are allowed. Or start new frame with init */ + /* current frame being ended. Only flush/end are allowed */ return ERROR(stage_wrong); } - if (mtctx->nbThreads==1) { /* delegate to single-thread (synchronous) */ + if (mtctx->params.nbThreads==1) { /* delegate to single-thread (synchronous) */ return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp); } - /* single-pass shortcut (note : this is synchronous-mode) */ - if ( (mtctx->nextJobID==0) /* just started */ - && (mtctx->inBuff.filled==0) /* nothing buffered */ - && (endOp==ZSTD_e_end) /* end order */ + /* single-pass shortcut (note : synchronous-mode) */ + if ( (mtctx->nextJobID == 0) /* just started */ + && (mtctx->inBuff.filled == 0) /* nothing buffered */ + && (endOp == ZSTD_e_end) /* end order */ && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough room */ - size_t const cSize = ZSTDMT_compress_advanced(mtctx, + size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx, (char*)output->dst + output->pos, output->size - output->pos, (const char*)input->src + input->pos, input->size - input->pos, - mtctx->cdict, mtctx->params, mtctx->overlapLog); + mtctx->cdict, mtctx->params); if (ZSTD_isError(cSize)) return cSize; input->pos = input->size; output->pos += cSize; ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer); /* was allocated in initStream */ mtctx->allJobsCompleted = 1; mtctx->frameEnded = 1; return 0; } /* fill input buffer */ if (input->size > input->pos) { /* support NULL input */ if (mtctx->inBuff.buffer.start == NULL) { - mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool); - if (mtctx->inBuff.buffer.start == NULL) return ERROR(memory_allocation); + mtctx->inBuff.buffer = ZSTDMT_getBuffer(mtctx->bufPool); /* note : may fail, in which case, no forward input progress */ mtctx->inBuff.filled = 0; } - { size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled); + if (mtctx->inBuff.buffer.start) { + size_t const toLoad = MIN(input->size - input->pos, mtctx->inBuffSize - mtctx->inBuff.filled); DEBUGLOG(5, "inBuff:%08X; inBuffSize=%u; ToCopy=%u", (U32)(size_t)mtctx->inBuff.buffer.start, (U32)mtctx->inBuffSize, (U32)toLoad); memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad); input->pos += toLoad; mtctx->inBuff.filled += toLoad; + forwardInputProgress = toLoad>0; } } if ( (mtctx->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */ && (mtctx->nextJobID <= mtctx->doneJobID + mtctx->jobIDMask) ) { /* avoid overwriting job round buffer */ CHECK_F( ZSTDMT_createCompressionJob(mtctx, mtctx->targetSectionSize, 0 /* endFrame */) ); } /* check for potential compressed data ready to be flushed */ - CHECK_F( ZSTDMT_flushNextJob(mtctx, output, (mtctx->inBuff.filled == mtctx->inBuffSize) /* blockToFlush */) ); /* block if it wasn't possible to create new job due to saturation */ + CHECK_F( ZSTDMT_flushNextJob(mtctx, output, !forwardInputProgress /* blockToFlush */) ); /* block if there was no forward input progress */ if (input->pos < input->size) /* input not consumed : do not flush yet */ endOp = ZSTD_e_continue; switch(endOp) { case ZSTD_e_flush: return ZSTDMT_flushStream(mtctx, output); case ZSTD_e_end: return ZSTDMT_endStream(mtctx, output); case ZSTD_e_continue: return 1; default: return ERROR(GENERIC); /* invalid endDirective */ } } size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { CHECK_F( ZSTDMT_compressStream_generic(zcs, output, input, ZSTD_e_continue) ); /* recommended next input size : fill current input buffer */ return zcs->inBuffSize - zcs->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ } static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned endFrame) { size_t const srcSize = zcs->inBuff.filled - zcs->dictSize; if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded)) && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) ); } /* check if there is any data available to flush */ return ZSTDMT_flushNextJob(zcs, output, 1 /* blockToFlush */); } size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) { DEBUGLOG(5, "ZSTDMT_flushStream"); - if (zcs->nbThreads==1) + if (zcs->params.nbThreads==1) return ZSTD_flushStream(zcs->cctxPool->cctx[0], output); return ZSTDMT_flushStream_internal(zcs, output, 0 /* endFrame */); } size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) { DEBUGLOG(4, "ZSTDMT_endStream"); - if (zcs->nbThreads==1) + if (zcs->params.nbThreads==1) return ZSTD_endStream(zcs->cctxPool->cctx[0], output); return ZSTDMT_flushStream_internal(zcs, output, 1 /* endFrame */); } Index: vendor/zstd/dist/lib/compress/zstdmt_compress.h =================================================================== --- vendor/zstd/dist/lib/compress/zstdmt_compress.h (revision 325596) +++ vendor/zstd/dist/lib/compress/zstdmt_compress.h (revision 325597) @@ -1,115 +1,132 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTDMT_COMPRESS_H #define ZSTDMT_COMPRESS_H #if defined (__cplusplus) extern "C" { #endif /* Note : This is an internal API. * Some methods are still exposed (ZSTDLIB_API), * because it used to be the only way to invoke MT compression. * Now, it's recommended to use ZSTD_compress_generic() instead. * These methods will stop being exposed in a future version */ /* === Dependencies === */ #include /* size_t */ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ #include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ /* === Memory management === */ typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads); ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbThreads, ZSTD_customMem cMem); ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); /* === Simple buffer-to-butter one-pass function === */ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); /* === Streaming functions === */ ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ /* === Advanced functions and parameters === */ #ifndef ZSTDMT_SECTION_SIZE_MIN # define ZSTDMT_SECTION_SIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */ #endif ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, ZSTD_parameters const params, unsigned overlapLog); ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */ ZSTD_parameters params, unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fparams, unsigned long long pledgedSrcSize); /* note : zero means empty */ -/* ZSDTMT_parameter : +/* ZSTDMT_parameter : * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ typedef enum { ZSTDMT_p_sectionSize, /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */ ZSTDMT_p_overlapSectionLog /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */ -} ZSDTMT_parameter; +} ZSTDMT_parameter; /* ZSTDMT_setMTCtxParameter() : * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. * The function must be called typically after ZSTD_createCCtx(). * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ -ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value); +ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value); /*! ZSTDMT_compressStream_generic() : * Combines ZSTDMT_compressStream() with ZSTDMT_flushStream() or ZSTDMT_endStream() * depending on flush directive. * @return : minimum amount of data still to be flushed * 0 if fully flushed * or an error code */ ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, ZSTD_EndDirective endOp); + +/* === Private definitions; never ever use directly === */ + +size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value); + +size_t ZSTDMT_initializeCCtxParameters(ZSTD_CCtx_params* params, unsigned nbThreads); + +/*! ZSTDMT_initCStream_internal() : + * Private use only. Init streaming operation. + * expects params to be valid. + * must receive dict, or cdict, or none, but not both. + * @return : 0, or an error code */ +size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); #if defined (__cplusplus) } #endif #endif /* ZSTDMT_COMPRESS_H */ Index: vendor/zstd/dist/lib/decompress/zstd_decompress.c =================================================================== --- vendor/zstd/dist/lib/decompress/zstd_decompress.c (revision 325596) +++ vendor/zstd/dist/lib/decompress/zstd_decompress.c (revision 325597) @@ -1,2478 +1,2655 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTD_decompress() will allocate memory, * in memory stack (0), or in memory heap (1, requires malloc()) */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /*! * LEGACY_SUPPORT : * if set to 1, ZSTD_decompress() can decode older formats (v0.1+) */ #ifndef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 0 #endif /*! * MAXWINDOWSIZE_DEFAULT : * maximum window size accepted by DStream, by default. * Frames requiring more memory will be rejected. */ #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT -# define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ +# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1) #endif /*-******************************************************* * Dependencies *********************************************************/ #include /* memcpy, memmove, memset */ #include "mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" #include "zstd_internal.h" #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) # include "zstd_legacy.h" #endif /*-************************************* * Errors ***************************************/ #define ZSTD_isError ERR_isError /* for inlining */ #define FSE_isError ERR_isError #define HUF_isError ERR_isError /*_******************************************************* * Memory operations **********************************************************/ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } /*-************************************************************* * Context management ***************************************************************/ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; typedef enum { zdss_init=0, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; typedef struct { FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; U32 rep[ZSTD_REP_NUM]; } ZSTD_entropyDTables_t; struct ZSTD_DCtx_s { const FSE_DTable* LLTptr; const FSE_DTable* MLTptr; const FSE_DTable* OFTptr; const HUF_DTable* HUFptr; ZSTD_entropyDTables_t entropy; const void* previousDstEnd; /* detect continuity */ const void* base; /* start of current segment */ const void* vBase; /* virtual start of previous segment if it was just before current one */ const void* dictEnd; /* end of previous segment */ size_t expected; ZSTD_frameHeader fParams; - blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ + U64 decodedSize; + blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ ZSTD_dStage stage; U32 litEntropy; U32 fseEntropy; XXH64_state_t xxhState; size_t headerSize; U32 dictID; + ZSTD_format_e format; const BYTE* litPtr; ZSTD_customMem customMem; size_t litSize; size_t rleSize; size_t staticSize; /* streaming */ ZSTD_DDict* ddictLocal; const ZSTD_DDict* ddict; ZSTD_dStreamStage streamStage; char* inBuff; size_t inBuffSize; size_t inPos; size_t maxWindowSize; char* outBuff; size_t outBuffSize; size_t outStart; size_t outEnd; - size_t blockSize; size_t lhSize; void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; U32 hostageByte; /* workspace */ BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support sizeof NULL */ return sizeof(*dctx) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; } size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } -size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) + +static size_t ZSTD_startingInputLength(ZSTD_format_e format) { - dctx->expected = ZSTD_frameHeaderSize_prefix; - dctx->stage = ZSTDds_getFrameHeaderSize; - dctx->previousDstEnd = NULL; - dctx->base = NULL; - dctx->vBase = NULL; - dctx->dictEnd = NULL; - dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ - dctx->litEntropy = dctx->fseEntropy = 0; - dctx->dictID = 0; - MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); - memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ - dctx->LLTptr = dctx->entropy.LLTable; - dctx->MLTptr = dctx->entropy.MLTable; - dctx->OFTptr = dctx->entropy.OFTable; - dctx->HUFptr = dctx->entropy.hufTable; - return 0; + size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ? + ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize : + ZSTD_frameHeaderSize_prefix; + ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE); + /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ + assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); + return startingInputLength; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) { - ZSTD_decompressBegin(dctx); /* cannot fail */ - dctx->staticSize = 0; + dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */ + dctx->staticSize = 0; dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; - dctx->ddict = NULL; - dctx->ddictLocal = NULL; - dctx->inBuff = NULL; - dctx->inBuffSize = 0; - dctx->outBuffSize= 0; + dctx->ddict = NULL; + dctx->ddictLocal = NULL; + dctx->inBuff = NULL; + dctx->inBuffSize = 0; + dctx->outBuffSize = 0; dctx->streamStage = zdss_init; } +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) +{ + ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; + + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ + + ZSTD_initDCtx_internal(dctx); + dctx->staticSize = workspaceSize; + dctx->inBuff = (char*)(dctx+1); + return dctx; +} + ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) { if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem); if (!dctx) return NULL; dctx->customMem = customMem; dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; ZSTD_initDCtx_internal(dctx); return dctx; } } -ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) -{ - ZSTD_DCtx* dctx = (ZSTD_DCtx*) workspace; - - if ((size_t)workspace & 7) return NULL; /* 8-aligned */ - if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ - - ZSTD_initDCtx_internal(dctx); - dctx->staticSize = workspaceSize; - dctx->inBuff = (char*)(dctx+1); - return dctx; -} - ZSTD_DCtx* ZSTD_createDCtx(void) { return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); } size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support free on NULL */ if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */ { ZSTD_customMem const cMem = dctx->customMem; ZSTD_freeDDict(dctx->ddictLocal); dctx->ddictLocal = NULL; ZSTD_free(dctx->inBuff, cMem); dctx->inBuff = NULL; #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (dctx->legacyContext) ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); #endif ZSTD_free(dctx, cMem); return 0; } } /* no longer useful */ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) { size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ } /*-************************************************************* * Decompression section ***************************************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ unsigned ZSTD_isFrame(const void* buffer, size_t size) { - if (size < 4) return 0; + if (size < ZSTD_frameIdSize) return 0; { U32 const magic = MEM_readLE32(buffer); if (magic == ZSTD_MAGICNUMBER) return 1; if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(buffer, size)) return 1; #endif return 0; } - -/** ZSTD_frameHeaderSize() : -* srcSize must be >= ZSTD_frameHeaderSize_prefix. -* @return : size of the Frame Header */ -size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +/** ZSTD_frameHeaderSize_internal() : + * srcSize must be large enough to reach header size fields. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : size of the Frame Header + * or an error code, which can be tested with ZSTD_isError() */ +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) { - if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); - { BYTE const fhd = ((const BYTE*)src)[4]; + size_t const minInputSize = ZSTD_startingInputLength(format); + if (srcSize < minInputSize) return ERROR(srcSize_wrong); + + { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; U32 const dictID= fhd & 3; U32 const singleSegment = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; - return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] - + (singleSegment && !fcsId); + return minInputSize + !singleSegment + + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + + (singleSegment && !fcsId); } } +/** ZSTD_frameHeaderSize() : + * srcSize must be >= ZSTD_frameHeaderSize_prefix. + * @return : size of the Frame Header */ +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); +} -/** ZSTD_getFrameHeader() : -* decode Frame Header, or require larger `srcSize`. -* @return : 0, `zfhPtr` is correctly filled, -* >0, `srcSize` is too small, result is expected `srcSize`, -* or an error code, which can be tested using ZSTD_isError() */ -size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) + +/** ZSTD_getFrameHeader_internal() : + * decode Frame Header, or require larger `srcSize`. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; - if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix; + size_t const minInputSize = ZSTD_startingInputLength(format); - if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) { + if (srcSize < minInputSize) return minInputSize; + + if ( (format != ZSTD_f_zstd1_magicless) + && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + frame length */ memset(zfhPtr, 0, sizeof(*zfhPtr)); - zfhPtr->frameContentSize = MEM_readLE32((const char *)src + 4); + zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize); zfhPtr->frameType = ZSTD_skippableFrame; - zfhPtr->windowSize = 0; return 0; } return ERROR(prefix_unknown); } /* ensure there is enough `srcSize` to fully read/decode frame header */ - { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); + { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); if (srcSize < fhsize) return fhsize; zfhPtr->headerSize = (U32)fhsize; } - { BYTE const fhdByte = ip[4]; - size_t pos = 5; + { BYTE const fhdByte = ip[minInputSize-1]; + size_t pos = minInputSize; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; U32 const singleSegment = (fhdByte>>5)&1; U32 const fcsID = fhdByte>>6; U64 windowSize = 0; U32 dictID = 0; U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */ if (!singleSegment) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; if (windowLog > ZSTD_WINDOWLOG_MAX) return ERROR(frameParameter_windowTooLarge); windowSize = (1ULL << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { default: assert(0); /* impossible */ case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; } switch(fcsID) { default: assert(0); /* impossible */ case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; case 3 : frameContentSize = MEM_readLE64(ip+pos); break; } if (singleSegment) windowSize = frameContentSize; zfhPtr->frameType = ZSTD_frame; zfhPtr->frameContentSize = frameContentSize; zfhPtr->windowSize = windowSize; + zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); zfhPtr->dictID = dictID; zfhPtr->checksumFlag = checksumFlag; } return 0; } +/** ZSTD_getFrameHeader() : + * decode Frame Header, or require larger `srcSize`. + * note : this function does not consume input, it only reads it. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) +{ + return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1); +} + + /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; } #endif { ZSTD_frameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { return 0; } else { return zfh.frameContentSize; } } } /** ZSTD_findDecompressedSize() : * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; while (srcSize >= ZSTD_frameHeaderSize_prefix) { - const U32 magicNumber = MEM_readLE32(src); + U32 const magicNumber = MEM_readLE32(src); if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { size_t skippableSize; if (srcSize < ZSTD_skippableHeaderSize) return ERROR(srcSize_wrong); - skippableSize = MEM_readLE32((const BYTE *)src + 4) + - ZSTD_skippableHeaderSize; + skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize) + + ZSTD_skippableHeaderSize; if (srcSize < skippableSize) { return ZSTD_CONTENTSIZE_ERROR; } src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; /* check for overflow */ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; totalDstSize += ret; } { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); if (ZSTD_isError(frameSrcSize)) { return ZSTD_CONTENTSIZE_ERROR; } src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; } - } + } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ - if (srcSize) { - return ZSTD_CONTENTSIZE_ERROR; - } + if (srcSize) return ZSTD_CONTENTSIZE_ERROR; return totalDstSize; } /** ZSTD_getDecompressedSize() : * compatible with legacy mode * @return : decompressed size if known, 0 otherwise note : 0 can mean any of the following : - frame content is empty - decompressed size field is not present in frame header - frame header unknown / not supported - frame header not complete (`srcSize` too small) */ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); - return ret >= ZSTD_CONTENTSIZE_ERROR ? 0 : ret; + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); + return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; } /** ZSTD_decodeFrameHeader() : * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) { - size_t const result = ZSTD_getFrameHeader(&(dctx->fParams), src, headerSize); - if (ZSTD_isError(result)) return result; /* invalid header */ - if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ + size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format); + if (ZSTD_isError(result)) return result; /* invalid header */ + if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong); if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); return 0; } /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); { U32 const cBlockHeader = MEM_readLE24(src); U32 const cSize = cBlockHeader >> 3; bpPtr->lastBlock = cBlockHeader & 1; bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); bpPtr->origSize = cSize; /* only useful for RLE */ if (bpPtr->blockType == bt_rle) return 1; if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected); return cSize; } } static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); memcpy(dst, src, srcSize); return srcSize; } static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, size_t regenSize) { if (srcSize != 1) return ERROR(srcSize_wrong); if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall); memset(dst, *(const BYTE*)src, regenSize); return regenSize; } /*! ZSTD_decodeLiteralsBlock() : - @return : nb of bytes read from src (< srcSize ) */ + * @return : nb of bytes read from src (< srcSize ) + * note : symbol not declared but exposed for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ { if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); { const BYTE* const istart = (const BYTE*) src; symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); switch(litEncType) { case set_repeat: if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); /* fall-through */ case set_compressed: if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ /* 2 - 2 - 10 - 10 */ singleStream = !lhlCode; lhSize = 3; litSize = (lhc >> 4) & 0x3FF; litCSize = (lhc >> 14) & 0x3FF; break; case 2: /* 2 - 2 - 14 - 14 */ lhSize = 4; litSize = (lhc >> 4) & 0x3FFF; litCSize = lhc >> 18; break; case 3: /* 2 - 2 - 18 - 18 */ lhSize = 5; litSize = (lhc >> 4) & 0x3FFFF; litCSize = (lhc >> 22) + (istart[4] << 10); break; } if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected); if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); if (HUF_isError((litEncType==set_repeat) ? ( singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) : ( singleStream ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->entropy.workspace, sizeof(dctx->entropy.workspace)) : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->entropy.workspace, sizeof(dctx->entropy.workspace))))) return ERROR(corruption_detected); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; break; } if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ if (litSize+lhSize > srcSize) return ERROR(corruption_detected); memcpy(dctx->litBuffer, istart+lhSize, litSize); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ lhSize = 1; litSize = istart[0] >> 3; break; case 1: lhSize = 2; litSize = MEM_readLE16(istart) >> 4; break; case 3: lhSize = 3; litSize = MEM_readLE24(istart) >> 4; if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ break; } if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected); memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; } default: return ERROR(corruption_detected); /* impossible */ } } } typedef union { FSE_decode_t realData; U32 alignedBy4; } FSE_decode_t4; /* Default FSE distribution table for Literal Lengths */ static const FSE_decode_t4 LL_defaultDTable[(1< max) return ERROR(corruption_detected); FSE_buildDTable_rle(DTableSpace, *(const BYTE*)src); *DTablePtr = DTableSpace; return 1; case set_basic : *DTablePtr = (const FSE_DTable*)tmpPtr; return 0; case set_repeat: if (!flagRepeatTable) return ERROR(corruption_detected); return 0; default : /* impossible */ case set_compressed : { U32 tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); if (FSE_isError(headerSize)) return ERROR(corruption_detected); if (tableLog > maxLog) return ERROR(corruption_detected); FSE_buildDTable(DTableSpace, norm, max, tableLog); *DTablePtr = DTableSpace; return headerSize; } } } size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* const iend = istart + srcSize; const BYTE* ip = istart; DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); /* check */ if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); /* SeqHead */ { int nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { if (ip+2 > iend) return ERROR(srcSize_wrong); nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; } else { if (ip >= iend) return ERROR(srcSize_wrong); nbSeq = ((nbSeq-0x80)<<8) + *ip++; } } *nbSeqPtr = nbSeq; } /* FSE table descriptors */ if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); ip += llhSize; } { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); ip += ofhSize; } { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); ip += mlhSize; } } return ip-istart; } typedef struct { size_t litLength; size_t matchLength; size_t offset; const BYTE* match; } seq_t; typedef struct { BIT_DStream_t DStream; FSE_DState_t stateLL; FSE_DState_t stateOffb; FSE_DState_t stateML; size_t prevOffset[ZSTD_REP_NUM]; const BYTE* base; size_t pos; uPtrDiff gotoDict; } seqState_t; FORCE_NOINLINE size_t ZSTD_execSequenceLast7(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */ /* copy literals */ if (op < oend_w) { ZSTD_wildcopy(op, *litPtr, oend_w - op); *litPtr += oend_w - op; op = oend_w; } while (op < oLitEnd) *op++ = *(*litPtr)++; /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd - (base-match); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; } } while (op < oMatchEnd) *op++ = *match++; return sequenceLength; } -static seq_t ZSTD_decodeSequence(seqState_t* seqState) +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; + +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) + * bits before reloading. This value is the maximum number of bytes we read + * after reloading when we are decoding long offets. + */ +#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ + (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ + ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ + : 0) + +static seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) { seq_t seq; U32 const llCode = FSE_peekSymbol(&seqState->stateLL); U32 const mlCode = FSE_peekSymbol(&seqState->stateML); - U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= MaxOff, by table construction */ U32 const llBits = LL_bits[llCode]; U32 const mlBits = ML_bits[mlCode]; U32 const ofBits = ofCode; U32 const totalBits = llBits+mlBits+ofBits; static const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; static const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, - 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; /* sequence */ { size_t offset; if (!ofCode) offset = 0; else { - offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets) { + U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1); + offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } else { + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } } if (ofCode <= 1) { offset += (llCode==0); if (offset) { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } else { offset = seqState->prevOffset[0]; } } else { seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } seq.offset = offset; } seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ - if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ - if ( MEM_32bits() - || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) - BIT_reloadDStream(&seqState->DStream); + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); /* ANS state update */ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ return seq; } HINT_INLINE size_t ZSTD_execSequence(BYTE* op, BYTE* const oend, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = oLitEnd - sequence.offset; /* check */ if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); /* copy Literals */ ZSTD_copy8(op, *litPtr); if (sequence.litLength > 8) ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* copy Match */ if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix -> go into extDict */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); match = dictEnd + (match - base); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_w || sequence.matchLength < MINMATCH) { U32 i; for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; return sequenceLength; } } } /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= sub2; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_w) { ZSTD_wildcopy(op, match, oend_w - op); match += oend_w - op; op = oend_w; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } return sequenceLength; } static size_t ZSTD_decompressSequences( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize) + const void* seqStart, size_t seqSize, + const ZSTD_longOffset_e isLongOffset) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); int nbSeq; DEBUGLOG(5, "ZSTD_decompressSequences"); /* Build Decoding Tables */ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); DEBUGLOG(5, "ZSTD_decodeSeqHeaders: size=%u, nbSeq=%i", (U32)seqHSize, nbSeq); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; } /* Regen sequences */ if (nbSeq) { seqState_t seqState; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { nbSeq--; - { seq_t const sequence = ZSTD_decodeSequence(&seqState); + { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } } /* check if reached exact end */ DEBUGLOG(5, "after decode loop, remaining nbSeq : %i", nbSeq); if (nbSeq) return ERROR(corruption_detected); /* save reps for next block */ { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); memcpy(op, litPtr, lastLLSize); op += lastLLSize; } return op-ostart; } -FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets) +HINT_INLINE +seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets) { seq_t seq; U32 const llCode = FSE_peekSymbol(&seqState->stateLL); U32 const mlCode = FSE_peekSymbol(&seqState->stateML); - U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= MaxOff, by table construction */ U32 const llBits = LL_bits[llCode]; U32 const mlBits = ML_bits[mlCode]; U32 const ofBits = ofCode; U32 const totalBits = llBits+mlBits+ofBits; static const U32 LL_base[MaxLL+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000 }; static const U32 ML_base[MaxML+1] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; static const U32 OF_base[MaxOff+1] = { 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, - 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; /* sequence */ { size_t offset; if (!ofCode) offset = 0; else { - if (longOffsets) { - int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets) { + U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1); offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); } else { offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); } } if (ofCode <= 1) { offset += (llCode==0); if (offset) { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset = temp; } else { offset = seqState->prevOffset[0]; } } else { seqState->prevOffset[2] = seqState->prevOffset[1]; seqState->prevOffset[1] = seqState->prevOffset[0]; seqState->prevOffset[0] = offset; } seq.offset = offset; } seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ - if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ - if (MEM_32bits() || - (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); { size_t const pos = seqState->pos + seq.litLength; seq.match = seqState->base + pos - seq.offset; /* single memory segment */ if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ seqState->pos = pos + seq.matchLength; } /* ANS state update */ FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ return seq; } -static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) { - if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { - return ZSTD_decodeSequenceLong_generic(seqState, 1); - } else { - return ZSTD_decodeSequenceLong_generic(seqState, 0); - } -} HINT_INLINE size_t ZSTD_execSequenceLong(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; const BYTE* const iLitEnd = *litPtr + sequence.litLength; const BYTE* match = sequence.match; /* check */ -#if 1 if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); -#endif /* copy Literals */ ZSTD_copy8(op, *litPtr); if (sequence.litLength > 8) ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ op = oLitEnd; *litPtr = iLitEnd; /* update for next sequence */ /* copy Match */ -#if 1 if (sequence.offset > (size_t)(oLitEnd - base)) { /* offset beyond prefix */ if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); if (match + sequence.matchLength <= dictEnd) { memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; match = base; if (op > oend_w || sequence.matchLength < MINMATCH) { U32 i; for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; return sequenceLength; } } } - /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ -#endif + assert(op <= oend_w); + assert(sequence.matchLength >= MINMATCH); /* match within prefix */ if (sequence.offset < 8) { /* close range match, overlap */ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ int const sub2 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= sub2; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (oMatchEnd > oend-(16-MINMATCH)) { if (op < oend_w) { ZSTD_wildcopy(op, match, oend_w - op); match += oend_w - op; op = oend_w; } while (op < oMatchEnd) *op++ = *match++; } else { ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ } return sequenceLength; } static size_t ZSTD_decompressSequencesLong( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize) + const void* seqStart, size_t seqSize, + const ZSTD_longOffset_e isLongOffset) { const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const base = (const BYTE*) (dctx->base); const BYTE* const vBase = (const BYTE*) (dctx->vBase); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - unsigned const windowSize32 = (unsigned)dctx->fParams.windowSize; int nbSeq; /* Build Decoding Tables */ { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; } /* Regen sequences */ if (nbSeq) { #define STORED_SEQS 4 #define STOSEQ_MASK (STORED_SEQS-1) #define ADVANCED_SEQS 4 seq_t sequences[STORED_SEQS]; int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); seqState_t seqState; int seqNb; dctx->fseEntropy = 1; { U32 i; for (i=0; ientropy.rep[i]; } seqState.base = base; seqState.pos = (size_t)(op-base); seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); /* prepare in advance */ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } } /* last literal segment */ { size_t const lastLLSize = litEnd - litPtr; if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); memcpy(op, litPtr, lastLLSize); op += lastLLSize; } return op-ostart; } static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize) + const void* src, size_t srcSize, const int frame) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; - DEBUGLOG(5, "ZSTD_decompressBlock_internal"); + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. + * We don't expect that to be the case in 64-bit mode. + * If we are in block mode we don't know the window size, so we have to be + * conservative. + */ + ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))); + /* windowSize could be any value at this point, since it is only validated + * in the streaming API. + */ + DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* Decode literals section */ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; } - if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ - /* likely because of register pressure */ - /* if that's the correct cause, then 32-bits ARM should be affected differently */ - /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ - if (dctx->fParams.windowSize > (1<<23)) - return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); + if (frame && dctx->fParams.windowSize > (1<<23)) + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, isLongOffset); + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, isLongOffset); } static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) { if (dst != dctx->previousDstEnd) { /* not contiguous */ dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dst; dctx->previousDstEnd = dst; } } size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t dSize; ZSTD_checkContinuity(dctx, dst); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } /** ZSTD_insertBlock() : insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) { ZSTD_checkContinuity(dctx, blockStart); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } -size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) +static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) { if (length > dstCapacity) return ERROR(dstSize_tooSmall); memset(dst, byte, length); return length; } /** ZSTD_findFrameCompressedSize() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame * `srcSize` must be at least as large as the frame contained * @return : the compressed size of the frame starting at `src` */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameCompressedSizeLegacy(src, srcSize); #endif if ( (srcSize >= ZSTD_skippableHeaderSize) && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) { - return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4); + return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize); } else { const BYTE* ip = (const BYTE*)src; const BYTE* const ipstart = ip; size_t remainingSize = srcSize; ZSTD_frameHeader zfh; /* Extract Frame Header */ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(ret)) return ret; if (ret > 0) return ERROR(srcSize_wrong); } ip += zfh.headerSize; remainingSize -= zfh.headerSize; /* Loop on each block */ while (1) { blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong); ip += ZSTD_blockHeaderSize + cBlockSize; remainingSize -= ZSTD_blockHeaderSize + cBlockSize; if (blockProperties.lastBlock) break; } if (zfh.checksumFlag) { /* Final frame content checksum */ if (remainingSize < 4) return ERROR(srcSize_wrong); ip += 4; remainingSize -= 4; } return ip - ipstart; } } /*! ZSTD_decompressFrame() : * @dctx must be properly initialized */ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void** srcPtr, size_t *srcSizePtr) { const BYTE* ip = (const BYTE*)(*srcPtr); BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t remainingSize = *srcSizePtr; /* check */ if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); /* Frame Header */ { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) ); ip += frameHeaderSize; remainingSize -= frameHeaderSize; } /* Loop on each block */ while (1) { size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); break; case bt_rle : decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); break; case bt_reserved : default: return ERROR(corruption_detected); } if (ZSTD_isError(decodedSize)) return decodedSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize); op += decodedSize; ip += cBlockSize; remainingSize -= cBlockSize; if (blockProperties.lastBlock) break; } + if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { + if ((U64)(op-ostart) != dctx->fParams.frameContentSize) { + return ERROR(corruption_detected); + } } if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); U32 checkRead; if (remainingSize<4) return ERROR(checksum_wrong); checkRead = MEM_readLE32(ip); if (checkRead != checkCalc) return ERROR(checksum_wrong); ip += 4; remainingSize -= 4; } /* Allow caller to get size read */ *srcPtr = ip; *srcSizePtr = remainingSize; return op-ostart; } static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict); static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict); static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, const ZSTD_DDict* ddict) { void* const dststart = dst; assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ if (ddict) { dict = ZSTD_DDictDictContent(ddict); dictSize = ZSTD_DDictDictSize(ddict); } while (srcSize >= ZSTD_frameHeaderSize_prefix) { U32 magicNumber; #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) if (ZSTD_isLegacy(src, srcSize)) { size_t decodedSize; size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); if (ZSTD_isError(frameSize)) return frameSize; /* legacy support is not compatible with static dctx */ if (dctx->staticSize) return ERROR(memory_allocation); decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); dst = (BYTE*)dst + decodedSize; dstCapacity -= decodedSize; src = (const BYTE*)src + frameSize; srcSize -= frameSize; continue; } #endif magicNumber = MEM_readLE32(src); + DEBUGLOG(4, "reading magic number %08X (expecting %08X)", + (U32)magicNumber, (U32)ZSTD_MAGICNUMBER); if (magicNumber != ZSTD_MAGICNUMBER) { if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { size_t skippableSize; if (srcSize < ZSTD_skippableHeaderSize) return ERROR(srcSize_wrong); - skippableSize = MEM_readLE32((const BYTE *)src + 4) + - ZSTD_skippableHeaderSize; + skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize) + + ZSTD_skippableHeaderSize; if (srcSize < skippableSize) return ERROR(srcSize_wrong); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } return ERROR(prefix_unknown); } if (ddict) { /* we were called from ZSTD_decompress_usingDDict */ CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict)); } else { /* this will initialize correctly with no dict if dict == NULL, so * use this in all cases but ddict */ CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); } ZSTD_checkContinuity(dctx, dst); { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); if (ZSTD_isError(res)) return res; /* no need to bound check, ZSTD_decompressFrame already has */ dst = (BYTE*)dst + res; dstCapacity -= res; } } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */ return (BYTE*)dst - (BYTE*)dststart; } size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); } size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); } size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; ZSTD_DCtx* const dctx = ZSTD_createDCtx(); if (dctx==NULL) return ERROR(memory_allocation); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTD_DCtx dctx; return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /*-************************************** * Advanced Streaming Decompression API * Bufferless and synchronous ****************************************/ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { switch(dctx->stage) { default: /* should not happen */ assert(0); case ZSTDds_getFrameHeaderSize: case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; case ZSTDds_decompressBlock: return ZSTDnit_block; case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } } static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /** ZSTD_decompressContinue() : * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_decompressContinue"); /* Sanity check */ - if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* unauthorized */ + if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */ if (dstCapacity) ZSTD_checkContinuity(dctx, dst); switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : - if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); /* unauthorized */ assert(src != NULL); - if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); - dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ - dctx->stage = ZSTDds_decodeSkippableHeader; - return 0; - } - dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); + if (dctx->format == ZSTD_f_zstd1) { /* allows header */ + assert(srcSize >= ZSTD_frameIdSize); /* to read skippable magic number */ + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */ + dctx->stage = ZSTDds_decodeSkippableHeader; + return 0; + } } + dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; - memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); - if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { - dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; - dctx->stage = ZSTDds_decodeFrameHeader; - return 0; - } - dctx->expected = 0; /* not necessary to copy more */ - /* fall-through */ + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = dctx->headerSize - srcSize; + dctx->stage = ZSTDds_decodeFrameHeader; + return 0; + case ZSTDds_decodeFrameHeader: assert(src != NULL); - memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->rleSize = bp.origSize; if (cBlockSize) { dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; return 0; } /* empty block */ if (bp.lastBlock) { if (dctx->fParams.checksumFlag) { dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* end of frame */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ dctx->stage = ZSTDds_decodeBlockHeader; } return 0; } + case ZSTDds_decompressLastBlock: case ZSTDds_decompressBlock: DEBUGLOG(5, "case ZSTDds_decompressBlock"); { size_t rSize; switch(dctx->bType) { case bt_compressed: DEBUGLOG(5, "case bt_compressed"); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); break; case bt_raw : rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break; case bt_rle : rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break; case bt_reserved : /* should never happen */ default: return ERROR(corruption_detected); } if (ZSTD_isError(rSize)) return rSize; + DEBUGLOG(5, "decoded size from block : %u", (U32)rSize); + dctx->decodedSize += rSize; if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ + DEBUGLOG(4, "decoded size from frame : %u", (U32)dctx->decodedSize); + if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { + if (dctx->decodedSize != dctx->fParams.frameContentSize) { + return ERROR(corruption_detected); + } } if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* ends here */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTD_blockHeaderSize; dctx->previousDstEnd = (char*)dst + rSize; } return rSize; } + case ZSTDds_checkChecksum: + assert(srcSize == 4); /* guaranteed by dctx->expected */ { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); - U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ + U32 const check32 = MEM_readLE32(src); + DEBUGLOG(4, "checksum : calculated %08X :: %08X read", h32, check32); if (check32 != h32) return ERROR(checksum_wrong); dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } + case ZSTDds_decodeSkippableHeader: - { assert(src != NULL); - memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); - dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); - dctx->stage = ZSTDds_skipFrame; - return 0; - } + assert(src != NULL); + assert(srcSize <= ZSTD_skippableHeaderSize); + memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */ + dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize); /* note : dctx->expected can grow seriously large, beyond local buffer size */ + dctx->stage = ZSTDds_skipFrame; + return 0; + case ZSTDds_skipFrame: - { dctx->expected = 0; - dctx->stage = ZSTDds_getFrameHeaderSize; - return 0; - } + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + default: return ERROR(GENERIC); /* impossible */ } } static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); dctx->base = dict; dctx->previousDstEnd = (const char*)dict + dictSize; return 0; } /* ZSTD_loadEntropy() : * dict : must point at beginning of a valid zstd dictionary * @return : size of entropy tables read */ static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; if (dictSize <= 8) return ERROR(dictionary_corrupted); dictPtr += 8; /* skip header = magic + dictID */ { size_t const hSize = HUF_readDTableX4_wksp( entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace)); if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; U32 offcodeMaxValue = MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); CHECK_E(FSE_buildDTable(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); CHECK_E(FSE_buildDTable(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); CHECK_E(FSE_buildDTable(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted); dictPtr += litlengthHeaderSize; } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted); entropy->rep[i] = rep; } } return dictPtr - (const BYTE*)dict; } static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); { U32 const magic = MEM_readLE32(dict); if (magic != ZSTD_MAGIC_DICTIONARY) { return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } - dctx->dictID = MEM_readLE32((const char*)dict + 4); + dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize); /* load entropy tables */ { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); dict = (const char*)dict + eSize; dictSize -= eSize; } dctx->litEntropy = dctx->fseEntropy = 1; /* reference dictionary content */ return ZSTD_refDictContent(dctx, dict, dictSize); } +/* Note : this function cannot fail */ +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +{ + assert(dctx != NULL); + dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ + dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->decodedSize = 0; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + dctx->vBase = NULL; + dctx->dictEnd = NULL; + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + dctx->LLTptr = dctx->entropy.LLTable; + dctx->MLTptr = dctx->entropy.MLTable; + dctx->OFTptr = dctx->entropy.OFTable; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; +} + size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { CHECK_F( ZSTD_decompressBegin(dctx) ); if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); return 0; } /* ====== ZSTD_DDict ====== */ struct ZSTD_DDict_s { void* dictBuffer; const void* dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; /* typedef'd to ZSTD_DDict within "zstd.h" */ static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict) { return ddict->dictContent; } static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict) { return ddict->dictSize; } size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict) { CHECK_F( ZSTD_decompressBegin(dstDCtx) ); if (ddict) { /* support begin on NULL */ dstDCtx->dictID = ddict->dictID; dstDCtx->base = ddict->dictContent; dstDCtx->vBase = ddict->dictContent; dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; dstDCtx->previousDstEnd = dstDCtx->dictEnd; if (ddict->entropyPresent) { dstDCtx->litEntropy = 1; dstDCtx->fseEntropy = 1; dstDCtx->LLTptr = ddict->entropy.LLTable; dstDCtx->MLTptr = ddict->entropy.MLTable; dstDCtx->OFTptr = ddict->entropy.OFTable; dstDCtx->HUFptr = ddict->entropy.hufTable; dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; } else { dstDCtx->litEntropy = 0; dstDCtx->fseEntropy = 0; } } return 0; } static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict) { ddict->dictID = 0; ddict->entropyPresent = 0; if (ddict->dictSize < 8) return 0; { U32 const magic = MEM_readLE32(ddict->dictContent); if (magic != ZSTD_MAGIC_DICTIONARY) return 0; /* pure content mode */ } - ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize); /* load entropy tables */ CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted ); ddict->entropyPresent = 1; return 0; } -static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, unsigned byReference) +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { - if ((byReference) || (!dict) || (!dictSize)) { + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { ddict->dictBuffer = NULL; ddict->dictContent = dict; } else { void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem); ddict->dictBuffer = internalBuffer; ddict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ CHECK_F( ZSTD_loadEntropy_inDDict(ddict) ); return 0; } -ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_customMem customMem) { if (!customMem.customAlloc ^ !customMem.customFree) return NULL; { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); if (!ddict) return NULL; ddict->cMem = customMem; - if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, byReference) )) { + if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod) )) { ZSTD_freeDDict(ddict); return NULL; } return ddict; } } /*! ZSTD_createDDict() : * Create a digested dictionary, to start decompression without startup delay. * `dict` content is copied inside DDict. * Consequently, `dict` can be released after `ZSTD_DDict` creation */ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; - return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator); + return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, allocator); } /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, to start decompression without startup delay. * Dictionary content is simply referenced, it will be accessed during decompression. * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; - return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator); + return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, allocator); } ZSTD_DDict* ZSTD_initStaticDDict(void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, - unsigned byReference) + ZSTD_dictLoadMethod_e dictLoadMethod) { - size_t const neededSpace = sizeof(ZSTD_DDict) + (byReference ? 0 : dictSize); + size_t const neededSpace = + sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace; assert(workspace != NULL); assert(dict != NULL); if ((size_t)workspace & 7) return NULL; /* 8-aligned */ if (workspaceSize < neededSpace) return NULL; - if (!byReference) { + if (dictLoadMethod == ZSTD_dlm_byCopy) { memcpy(ddict+1, dict, dictSize); /* local copy */ dict = ddict+1; } - if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, 1 /* byRef */) )) + if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef) )) return NULL; return ddict; } size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->cMem; ZSTD_free(ddict->dictBuffer, cMem); ZSTD_free(ddict, cMem); return 0; } } /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. - * Note : dictionary created "byReference" are smaller */ -size_t ZSTD_estimateDDictSize(size_t dictSize, unsigned byReference) + * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { - return sizeof(ZSTD_DDict) + (byReference ? 0 : dictSize); + return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support sizeof on NULL */ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; } /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; - return MEM_readLE32((const char*)dict + 4); + return MEM_readLE32((const char*)dict + ZSTD_frameIdSize); } /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); } /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompresse frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. * Needed dictionary is a hidden information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, ZSTD_frame, 0, 0, 0 }; + ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; } /*! ZSTD_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict) { /* pass content and size in case legacy frames are encountered */ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); } /*===================================== * Streaming decompression *====================================*/ ZSTD_DStream* ZSTD_createDStream(void) { return ZSTD_createDStream_advanced(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticDCtx(workspace, workspaceSize); } ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { return ZSTD_createDCtx_advanced(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) { return ZSTD_freeDCtx(zds); } /* *** Initialization *** */ size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) { zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; ZSTD_freeDDict(zds->ddictLocal); if (dict && dictSize >= 8) { zds->ddictLocal = ZSTD_createDDict(dict, dictSize); if (zds->ddictLocal == NULL) return ERROR(memory_allocation); } else zds->ddictLocal = NULL; zds->ddict = zds->ddictLocal; zds->legacyVersion = 0; zds->hostageByte = 0; return ZSTD_frameHeaderSize_prefix; } +/* note : this variant can't fail */ size_t ZSTD_initDStream(ZSTD_DStream* zds) { return ZSTD_initDStream_usingDict(zds, NULL, 0); } /* ZSTD_initDStream_usingDDict() : - * ddict will just be referenced, and must outlive decompression session */ + * ddict will just be referenced, and must outlive decompression session + * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict) { size_t const initResult = ZSTD_initDStream(zds); zds->ddict = ddict; return initResult; } size_t ZSTD_resetDStream(ZSTD_DStream* zds) { zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; zds->legacyVersion = 0; zds->hostageByte = 0; return ZSTD_frameHeaderSize_prefix; } size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue) { + ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init); + if ((unsigned)zds->streamStage > (unsigned)zdss_loadHeader) + return ERROR(stage_wrong); switch(paramType) { default : return ERROR(parameter_unsupported); - case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break; + case DStream_p_maxWindowSize : + DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10); + zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); + break; } return 0; } +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) +{ + ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init); + if ((unsigned)dctx->streamStage > (unsigned)zdss_loadHeader) + return ERROR(stage_wrong); + dctx->maxWindowSize = maxWindowSize; + return 0; +} +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) +{ + DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format); + ZSTD_STATIC_ASSERT((unsigned)zdss_loadHeader >= (unsigned)zdss_init); + if ((unsigned)dctx->streamStage > (unsigned)zdss_loadHeader) + return ERROR(stage_wrong); + dctx->format = format; + return 0; +} + + size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds) { return ZSTD_sizeof_DCtx(zds); } +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +{ + size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); + size_t const minRBSize = (size_t) neededSize; + if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge); + return minRBSize; +} + size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); size_t const inBuffSize = blockSize; /* no block can be larger */ - size_t const outBuffSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; } -ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { - U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */ ZSTD_frameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; if (err>0) return ERROR(srcSize_wrong); if (zfh.windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge); return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); } /* ***** Decompression ***** */ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { size_t const length = MIN(dstCapacity, srcSize); memcpy(dst, src, length); return length; } size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { const char* const istart = (const char*)(input->src) + input->pos; const char* const iend = (const char*)(input->src) + input->size; const char* ip = istart; char* const ostart = (char*)(output->dst) + output->pos; char* const oend = (char*)(output->dst) + output->size; char* op = ostart; U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); + if (input->pos > input->size) { /* forbidden */ + DEBUGLOG(5, "in: pos: %u vs size: %u", + (U32)input->pos, (U32)input->size); + return ERROR(srcSize_wrong); + } + if (output->pos > output->size) { /* forbidden */ + DEBUGLOG(5, "out: pos: %u vs size: %u", + (U32)output->pos, (U32)output->size); + return ERROR(dstSize_tooSmall); + } DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); + #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) if (zds->legacyVersion) { /* legacy support is incompatible with static dctx */ if (zds->staticSize) return ERROR(memory_allocation); return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); } #endif while (someMoreWork) { switch(zds->streamStage) { case zdss_init : ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ /* fall-through */ case zdss_loadHeader : - { size_t const hSize = ZSTD_getFrameHeader(&zds->fParams, zds->headerBuffer, zds->lhSize); + DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); + { size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + DEBUGLOG(5, "header size : %u", (U32)hSize); if (ZSTD_isError(hSize)) { #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); if (legacyVersion) { const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL; size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0; /* legacy support is incompatible with static dctx */ if (zds->staticSize) return ERROR(memory_allocation); CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, dict, dictSize)); zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; return ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); } #endif return hSize; /* error */ } if (hSize != 0) { /* need more input */ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ if (iend-ip > 0) { memcpy(zds->headerBuffer + zds->lhSize, ip, iend-ip); zds->lhSize += iend-ip; } input->pos = input->size; return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; break; } } /* check for single-pass mode opportunity */ if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); if (cSize <= (size_t)(iend-istart)) { size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict); if (ZSTD_isError(decompressedSize)) return decompressedSize; ip = istart + cSize; op += decompressedSize; zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; break; } } /* Consume header (see ZSTDds_decodeFrameHeader) */ DEBUGLOG(4, "Consume header"); CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict)); if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ - zds->expected = MEM_readLE32(zds->headerBuffer + 4); + zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize); zds->stage = ZSTDds_skipFrame; } else { CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize)); zds->expected = ZSTD_blockHeaderSize; zds->stage = ZSTDds_decodeBlockHeader; } /* control buffer memory usage */ - DEBUGLOG(4, "Control max buffer memory usage"); + DEBUGLOG(4, "Control max buffer memory usage (max %u KB)", + (U32)(zds->maxWindowSize >> 10)); zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); /* Adapt buffer sizes to frame header instructions */ - { size_t const blockSize = (size_t)(MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_MAX)); - size_t const neededOutSize = (size_t)(zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2); - zds->blockSize = blockSize; - if ((zds->inBuffSize < blockSize) || (zds->outBuffSize < neededOutSize)) { - size_t const bufferSize = blockSize + neededOutSize; + { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); + size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize); + if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) { + size_t const bufferSize = neededInBuffSize + neededOutBuffSize; DEBUGLOG(4, "inBuff : from %u to %u", - (U32)zds->inBuffSize, (U32)blockSize); + (U32)zds->inBuffSize, (U32)neededInBuffSize); DEBUGLOG(4, "outBuff : from %u to %u", - (U32)zds->outBuffSize, (U32)neededOutSize); + (U32)zds->outBuffSize, (U32)neededOutBuffSize); if (zds->staticSize) { /* static DCtx */ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx)) return ERROR(memory_allocation); } else { ZSTD_free(zds->inBuff, zds->customMem); zds->inBuffSize = 0; zds->outBuffSize = 0; zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem); if (zds->inBuff == NULL) return ERROR(memory_allocation); } - zds->inBuffSize = blockSize; + zds->inBuffSize = neededInBuffSize; zds->outBuff = zds->inBuff + zds->inBuffSize; - zds->outBuffSize = neededOutSize; + zds->outBuffSize = neededOutBuffSize; } } zds->streamStage = zdss_read; /* fall-through */ case zdss_read: DEBUGLOG(5, "stage zdss_read"); { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); if (neededInSize==0) { /* end of frame */ zds->streamStage = zdss_init; someMoreWork = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t const decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize); if (ZSTD_isError(decodedSize)) return decodedSize; ip += neededInSize; if (!decodedSize && !isSkipFrame) break; /* this was just a header */ zds->outEnd = zds->outStart + decodedSize; zds->streamStage = zdss_flush; break; } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; /* fall-through */ case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); ip += loadedSize; zds->inPos += loadedSize; if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ { const int isSkipFrame = ZSTD_isSkipFrame(zds); size_t const decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, zds->inBuff, neededInSize); if (ZSTD_isError(decodedSize)) return decodedSize; zds->inPos = 0; /* input is consumed */ if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */ zds->outEnd = zds->outStart + decodedSize; } } zds->streamStage = zdss_flush; /* fall-through */ case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); op += flushedSize; zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; - if (zds->outStart + zds->blockSize > zds->outBuffSize) + if ( (zds->outBuffSize < zds->fParams.frameContentSize) + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", + (int)(zds->outBuffSize - zds->outStart), + (U32)zds->fParams.blockSizeMax); zds->outStart = zds->outEnd = 0; + } break; } } /* cannot complete flush */ someMoreWork = 0; break; default: return ERROR(GENERIC); /* impossible */ } } /* result */ input->pos += (size_t)(ip-istart); output->pos += (size_t)(op-ostart); { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); if (!nextSrcSizeHint) { /* frame fully decoded */ if (zds->outEnd == zds->outStart) { /* output fully flushed */ if (zds->hostageByte) { if (input->pos >= input->size) { /* can't release hostage (not present) */ zds->streamStage = zdss_read; return 1; } input->pos++; /* release hostage */ } /* zds->hostageByte */ return 0; } /* zds->outEnd == zds->outStart */ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ zds->hostageByte=1; } return 1; } /* nextSrcSizeHint==0 */ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ if (zds->inPos > nextSrcSizeHint) return ERROR(GENERIC); /* should never happen */ nextSrcSizeHint -= zds->inPos; /* already loaded*/ return nextSrcSizeHint; } +} + + +size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + return ZSTD_decompressStream(dctx, output, input); +} + +size_t ZSTD_decompress_generic_simpleArgs ( + ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos) +{ + ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; + ZSTD_inBuffer input = { src, srcSize, *srcPos }; + /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ + size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; +} + +void ZSTD_DCtx_reset(ZSTD_DCtx* dctx) +{ + (void)ZSTD_initDStream(dctx); + dctx->format = ZSTD_f_zstd1; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; } Index: vendor/zstd/dist/lib/deprecated/zbuff.h =================================================================== --- vendor/zstd/dist/lib/deprecated/zbuff.h (revision 325596) +++ vendor/zstd/dist/lib/deprecated/zbuff.h (revision 325597) @@ -1,212 +1,213 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * NOTES/WARNINGS ******************************************************************/ /* The streaming API defined here is deprecated. * Consider migrating towards ZSTD_compressStream() API in `zstd.h` * See 'lib/README.md'. *****************************************************************/ #if defined (__cplusplus) extern "C" { #endif #ifndef ZSTD_BUFFERED_H_23987 #define ZSTD_BUFFERED_H_23987 /* ************************************* * Dependencies ***************************************/ #include /* size_t */ #include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ /* *************************************************************** * Compiler specifics *****************************************************************/ /* Deprecation warnings */ /* Should these warnings be a problem, it is generally possible to disable them, typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS # define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ #else # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API # elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) # elif defined(__GNUC__) && (__GNUC__ >= 3) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) # elif defined(_MSC_VER) # define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler") # define ZBUFF_DEPRECATED(message) ZSTDLIB_API # endif #endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */ /* ************************************* * Streaming functions ***************************************/ /* This is the easier "buffered" streaming API, * using an internal buffer to lift all restrictions on user-provided buffers * which can be any size, any place, for both input and output. * ZBUFF and ZSTD are 100% interoperable, * frames created by one can be decoded by the other one */ typedef ZSTD_CStream ZBUFF_CCtx; ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void); ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx); ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel); ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); /*-************************************************* * Streaming compression - howto * * A ZBUFF_CCtx object is required to track streaming operation. * Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. * ZBUFF_CCtx objects can be reused multiple times. * * Start by initializing ZBUF_CCtx. * Use ZBUFF_compressInit() to start a new compression operation. * Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary. * * Use ZBUFF_compressContinue() repetitively to consume input stream. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst . * @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency) * or an error code, which can be tested using ZBUFF_isError(). * * At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush(). * The nb of bytes written into `dst` will be reported into *dstCapacityPtr. * Note that the function cannot output more than *dstCapacityPtr, * therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressEnd() instructs to finish a frame. * It will perform a flush and write frame epilogue. * The epilogue is required for decoders to consider a frame completed. * Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. * In which case, call again ZBUFF_compressFlush() to complete the flush. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize() * input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency) * output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering. * By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering. * **************************************************/ typedef ZSTD_DStream ZBUFF_DCtx; ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void); ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx); ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx); ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize); ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); /*-*************************************************************************** * Streaming decompression howto * * A ZBUFF_DCtx object is required to track streaming operations. * Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. * Use ZBUFF_decompressInit() to start a new decompression operation, * or ZBUFF_decompressInitDictionary() if decompression requires a dictionary. * Note that ZBUFF_DCtx objects can be re-init multiple times. * * Use ZBUFF_decompressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. * The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. * @return : 0 when a frame is completely decoded and fully flushed, * 1 when there is still some data left within internal buffer to flush, * >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency), * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize() * output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. * input : ZBUFF_recommendedDInSize == 128KB + 3; * just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . * *******************************************************************************/ /* ************************************* * Tool functions ***************************************/ ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode); ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode); /** Functions below provide recommended buffer sizes for Compression or Decompression operations. * These sizes are just hints, they tend to offer better latency */ ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void); ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void); ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void); ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void); #endif /* ZSTD_BUFFERED_H_23987 */ #ifdef ZBUFF_STATIC_LINKING_ONLY #ifndef ZBUFF_STATIC_H_30298098432 #define ZBUFF_STATIC_H_30298098432 /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used in association with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ /*--- Dependency ---*/ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ #include "zstd.h" /*--- Custom memory allocator ---*/ /*! ZBUFF_createCCtx_advanced() : * Create a ZBUFF compression context using external alloc and free functions */ ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem); /*! ZBUFF_createDCtx_advanced() : * Create a ZBUFF decompression context using external alloc and free functions */ ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem); /*--- Advanced Streaming Initialization ---*/ ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); #endif /* ZBUFF_STATIC_H_30298098432 */ #endif /* ZBUFF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif Index: vendor/zstd/dist/lib/deprecated/zbuff_common.c =================================================================== --- vendor/zstd/dist/lib/deprecated/zbuff_common.c (revision 325596) +++ vendor/zstd/dist/lib/deprecated/zbuff_common.c (revision 325597) @@ -1,25 +1,26 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /*-************************************* * Dependencies ***************************************/ #include "error_private.h" #include "zbuff.h" /*-**************************************** * ZBUFF Error Management (deprecated) ******************************************/ /*! ZBUFF_isError() : * tells if a return value is an error code */ unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); } /*! ZBUFF_getErrorName() : * provides error code string from function result (useful for debugging) */ const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } Index: vendor/zstd/dist/lib/deprecated/zbuff_compress.c =================================================================== --- vendor/zstd/dist/lib/deprecated/zbuff_compress.c (revision 325596) +++ vendor/zstd/dist/lib/deprecated/zbuff_compress.c (revision 325597) @@ -1,145 +1,146 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Dependencies ***************************************/ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" /*-*********************************************************** * Streaming compression * * A ZBUFF_CCtx object is required to track streaming operation. * Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. * Use ZBUFF_compressInit() to start a new compression operation. * ZBUFF_CCtx objects can be reused multiple times. * * Use ZBUFF_compressContinue() repetitively to consume your input. * *srcSizePtr and *dstCapacityPtr can be any size. * The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. * Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. * The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst . * @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer. * Note that it will not output more than *dstCapacityPtr. * Therefore, some content might still be left into its internal buffer if dst buffer is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * ZBUFF_compressEnd() instructs to finish a frame. * It will perform a flush and write frame epilogue. * Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. * @return : nb of bytes still present into internal buffer (0 if it's empty) * or an error code, which can be tested using ZBUFF_isError(). * * Hint : recommended buffer sizes (not compulsory) * input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value. * output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed. * ***********************************************************/ ZBUFF_CCtx* ZBUFF_createCCtx(void) { return ZSTD_createCStream(); } ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem) { return ZSTD_createCStream_advanced(customMem); } size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc) { return ZSTD_freeCStream(zbc); } /* ====== Initialization ====== */ size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize) { return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize); } size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) { return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel); } size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) { return ZSTD_initCStream(zbc, compressionLevel); } /* ====== Compression ====== */ size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { size_t result; ZSTD_outBuffer outBuff; ZSTD_inBuffer inBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; inBuff.src = src; inBuff.pos = 0; inBuff.size = *srcSizePtr; result = ZSTD_compressStream(zbc, &outBuff, &inBuff); *dstCapacityPtr = outBuff.pos; *srcSizePtr = inBuff.pos; return result; } /* ====== Finalize ====== */ size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) { size_t result; ZSTD_outBuffer outBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; result = ZSTD_flushStream(zbc, &outBuff); *dstCapacityPtr = outBuff.pos; return result; } size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) { size_t result; ZSTD_outBuffer outBuff; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; result = ZSTD_endStream(zbc, &outBuff); *dstCapacityPtr = outBuff.pos; return result; } /* ************************************* * Tool functions ***************************************/ size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); } size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); } Index: vendor/zstd/dist/lib/deprecated/zbuff_decompress.c =================================================================== --- vendor/zstd/dist/lib/deprecated/zbuff_decompress.c (revision 325596) +++ vendor/zstd/dist/lib/deprecated/zbuff_decompress.c (revision 325597) @@ -1,74 +1,75 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* ************************************* * Dependencies ***************************************/ #define ZBUFF_STATIC_LINKING_ONLY #include "zbuff.h" ZBUFF_DCtx* ZBUFF_createDCtx(void) { return ZSTD_createDStream(); } ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem) { return ZSTD_createDStream_advanced(customMem); } size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd) { return ZSTD_freeDStream(zbd); } /* *** Initialization *** */ size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize) { return ZSTD_initDStream_usingDict(zbd, dict, dictSize); } size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd) { return ZSTD_initDStream(zbd); } /* *** Decompression *** */ size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr) { ZSTD_outBuffer outBuff; ZSTD_inBuffer inBuff; size_t result; outBuff.dst = dst; outBuff.pos = 0; outBuff.size = *dstCapacityPtr; inBuff.src = src; inBuff.pos = 0; inBuff.size = *srcSizePtr; result = ZSTD_decompressStream(zbd, &outBuff, &inBuff); *dstCapacityPtr = outBuff.pos; *srcSizePtr = inBuff.pos; return result; } /* ************************************* * Tool functions ***************************************/ size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); } size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); } Index: vendor/zstd/dist/lib/dictBuilder/cover.c =================================================================== --- vendor/zstd/dist/lib/dictBuilder/cover.c (revision 325596) +++ vendor/zstd/dist/lib/dictBuilder/cover.c (revision 325597) @@ -1,1034 +1,1045 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* ***************************************************************************** * Constructs a dictionary using a heuristic based on the following paper: * * Liao, Petri, Moffat, Wirth * Effective Construction of Relative Lempel-Ziv Dictionaries * Published in WWW 2016. * * Adapted from code originally written by @ot (Giuseppe Ottaviano). ******************************************************************************/ /*-************************************* * Dependencies ***************************************/ #include /* fprintf */ #include /* malloc, free, qsort */ #include /* memset */ #include /* clock */ #include "mem.h" /* read */ #include "pool.h" #include "threading.h" #include "zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" /*-************************************* * Constants ***************************************/ #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) /*-************************************* * Console display ***************************************/ static int g_displayLevel = 2; #define DISPLAY(...) \ { \ fprintf(stderr, __VA_ARGS__); \ fflush(stderr); \ } #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ if (displayLevel >= l) { \ DISPLAY(__VA_ARGS__); \ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ if (displayLevel >= l) { \ if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ g_time = clock(); \ DISPLAY(__VA_ARGS__); \ } \ } #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; static clock_t g_time = 0; /*-************************************* * Hash table *************************************** * A small specialized hash map for storing activeDmers. * The map does not resize, so if it becomes full it will loop forever. * Thus, the map must be large enough to store every value. * The map implements linear probing and keeps its load less than 0.5. */ #define MAP_EMPTY_VALUE ((U32)-1) typedef struct COVER_map_pair_t_s { U32 key; U32 value; } COVER_map_pair_t; typedef struct COVER_map_s { COVER_map_pair_t *data; U32 sizeLog; U32 size; U32 sizeMask; } COVER_map_t; /** * Clear the map. */ static void COVER_map_clear(COVER_map_t *map) { memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); } /** * Initializes a map of the given size. * Returns 1 on success and 0 on failure. * The map must be destroyed with COVER_map_destroy(). * The map is only guaranteed to be large enough to hold size elements. */ static int COVER_map_init(COVER_map_t *map, U32 size) { map->sizeLog = ZSTD_highbit32(size) + 2; map->size = (U32)1 << map->sizeLog; map->sizeMask = map->size - 1; map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); if (!map->data) { map->sizeLog = 0; map->size = 0; return 0; } COVER_map_clear(map); return 1; } /** * Internal hash function */ static const U32 prime4bytes = 2654435761U; static U32 COVER_map_hash(COVER_map_t *map, U32 key) { return (key * prime4bytes) >> (32 - map->sizeLog); } /** * Helper function that returns the index that a key should be placed into. */ static U32 COVER_map_index(COVER_map_t *map, U32 key) { const U32 hash = COVER_map_hash(map, key); U32 i; for (i = hash;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *pos = &map->data[i]; if (pos->value == MAP_EMPTY_VALUE) { return i; } if (pos->key == key) { return i; } } } /** * Returns the pointer to the value for key. * If key is not in the map, it is inserted and the value is set to 0. * The map must not be full. */ static U32 *COVER_map_at(COVER_map_t *map, U32 key) { COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; if (pos->value == MAP_EMPTY_VALUE) { pos->key = key; pos->value = 0; } return &pos->value; } /** * Deletes key from the map if present. */ static void COVER_map_remove(COVER_map_t *map, U32 key) { U32 i = COVER_map_index(map, key); COVER_map_pair_t *del = &map->data[i]; U32 shift = 1; if (del->value == MAP_EMPTY_VALUE) { return; } for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { COVER_map_pair_t *const pos = &map->data[i]; /* If the position is empty we are done */ if (pos->value == MAP_EMPTY_VALUE) { del->value = MAP_EMPTY_VALUE; return; } /* If pos can be moved to del do so */ if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { del->key = pos->key; del->value = pos->value; del = pos; shift = 1; } else { ++shift; } } } /** * Destroyes a map that is inited with COVER_map_init(). */ static void COVER_map_destroy(COVER_map_t *map) { if (map->data) { free(map->data); } map->data = NULL; map->size = 0; } /*-************************************* * Context ***************************************/ typedef struct { const BYTE *samples; size_t *offsets; const size_t *samplesSizes; size_t nbSamples; U32 *suffix; size_t suffixSize; U32 *freqs; U32 *dmerAt; unsigned d; } COVER_ctx_t; /* We need a global context for qsort... */ static COVER_ctx_t *g_ctx = NULL; /*-************************************* * Helper functions ***************************************/ /** * Returns the sum of the sample sizes. */ static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { size_t sum = 0; size_t i; for (i = 0; i < nbSamples; ++i) { sum += samplesSizes[i]; } return sum; } /** * Returns -1 if the dmer at lp is less than the dmer at rp. * Return 0 if the dmers at lp and rp are equal. * Returns 1 if the dmer at lp is greater than the dmer at rp. */ static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { U32 const lhs = *(U32 const *)lp; U32 const rhs = *(U32 const *)rp; return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); } /** * Faster version for d <= 8. */ static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; if (lhs < rhs) { return -1; } return (lhs > rhs); } /** * Same as COVER_cmp() except ties are broken by pointer value * NOTE: g_ctx must be set to call this function. A global is required because * qsort doesn't take an opaque pointer. */ static int COVER_strict_cmp(const void *lp, const void *rp) { int result = COVER_cmp(g_ctx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Faster version for d <= 8. */ static int COVER_strict_cmp8(const void *lp, const void *rp) { int result = COVER_cmp8(g_ctx, lp, rp); if (result == 0) { result = lp < rp ? -1 : 1; } return result; } /** * Returns the first pointer in [first, last) whose element does not compare * less than value. If no such element exists it returns last. */ static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, size_t value) { size_t count = last - first; while (count != 0) { size_t step = count / 2; const size_t *ptr = first; ptr += step; if (*ptr < value) { first = ++ptr; count -= step + 1; } else { count = step; } } return first; } /** * Generic groupBy function. * Groups an array sorted by cmp into groups with equivalent values. * Calls grp for each group. */ static void COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, int (*cmp)(COVER_ctx_t *, const void *, const void *), void (*grp)(COVER_ctx_t *, const void *, const void *)) { const BYTE *ptr = (const BYTE *)data; size_t num = 0; while (num < count) { const BYTE *grpEnd = ptr + size; ++num; while (num < count && cmp(ctx, ptr, grpEnd) == 0) { grpEnd += size; ++num; } grp(ctx, ptr, grpEnd); ptr = grpEnd; } } /*-************************************* * Cover functions ***************************************/ /** * Called on each group of positions with the same dmer. * Counts the frequency of each dmer and saves it in the suffix array. * Fills `ctx->dmerAt`. */ static void COVER_group(COVER_ctx_t *ctx, const void *group, const void *groupEnd) { /* The group consists of all the positions with the same first d bytes. */ const U32 *grpPtr = (const U32 *)group; const U32 *grpEnd = (const U32 *)groupEnd; /* The dmerId is how we will reference this dmer. * This allows us to map the whole dmer space to a much smaller space, the * size of the suffix array. */ const U32 dmerId = (U32)(grpPtr - ctx->suffix); /* Count the number of samples this dmer shows up in */ U32 freq = 0; /* Details */ const size_t *curOffsetPtr = ctx->offsets; const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a * different sample than the last. */ size_t curSampleEnd = ctx->offsets[0]; for (; grpPtr != grpEnd; ++grpPtr) { /* Save the dmerId for this position so we can get back to it. */ ctx->dmerAt[*grpPtr] = dmerId; /* Dictionaries only help for the first reference to the dmer. * After that zstd can reference the match from the previous reference. * So only count each dmer once for each sample it is in. */ if (*grpPtr < curSampleEnd) { continue; } freq += 1; /* Binary search to find the end of the sample *grpPtr is in. * In the common case that grpPtr + 1 == grpEnd we can skip the binary * search because the loop is over. */ if (grpPtr + 1 != grpEnd) { const size_t *sampleEndPtr = COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); curSampleEnd = *sampleEndPtr; curOffsetPtr = sampleEndPtr + 1; } } /* At this point we are never going to look at this segment of the suffix * array again. We take advantage of this fact to save memory. * We store the frequency of the dmer in the first position of the group, * which is dmerId. */ ctx->suffix[dmerId] = freq; } /** * A segment is a range in the source as well as the score of the segment. */ typedef struct { U32 begin; U32 end; - double score; + U32 score; } COVER_segment_t; /** * Selects the best segment in an epoch. * Segments of are scored according to the function: * * Let F(d) be the frequency of dmer d. * Let S_i be the dmer at position i of segment S which has length k. * * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) * * Once the dmer d is in the dictionay we set F(d) = 0. */ static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, U32 begin, U32 end, ZDICT_cover_params_t parameters) { /* Constants */ const U32 k = parameters.k; const U32 d = parameters.d; const U32 dmersInK = k - d + 1; /* Try each segment (activeSegment) and save the best (bestSegment) */ COVER_segment_t bestSegment = {0, 0, 0}; COVER_segment_t activeSegment; /* Reset the activeDmers in the segment */ COVER_map_clear(activeDmers); /* The activeSegment starts at the beginning of the epoch. */ activeSegment.begin = begin; activeSegment.end = begin; activeSegment.score = 0; /* Slide the activeSegment through the whole epoch. * Save the best segment in bestSegment. */ while (activeSegment.end < end) { /* The dmerId for the dmer at the next position */ U32 newDmer = ctx->dmerAt[activeSegment.end]; /* The entry in activeDmers for this dmerId */ U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); /* If the dmer isn't already present in the segment add its score. */ if (*newDmerOcc == 0) { /* The paper suggest using the L-0.5 norm, but experiments show that it * doesn't help. */ activeSegment.score += freqs[newDmer]; } /* Add the dmer to the segment */ activeSegment.end += 1; *newDmerOcc += 1; /* If the window is now too large, drop the first position */ if (activeSegment.end - activeSegment.begin == dmersInK + 1) { U32 delDmer = ctx->dmerAt[activeSegment.begin]; U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); activeSegment.begin += 1; *delDmerOcc -= 1; /* If this is the last occurence of the dmer, subtract its score */ if (*delDmerOcc == 0) { COVER_map_remove(activeDmers, delDmer); activeSegment.score -= freqs[delDmer]; } } /* If this segment is the best so far save it */ if (activeSegment.score > bestSegment.score) { bestSegment = activeSegment; } } { /* Trim off the zero frequency head and tail from the segment. */ U32 newBegin = bestSegment.end; U32 newEnd = bestSegment.begin; U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { U32 freq = freqs[ctx->dmerAt[pos]]; if (freq != 0) { newBegin = MIN(newBegin, pos); newEnd = pos + 1; } } bestSegment.begin = newBegin; bestSegment.end = newEnd; } { /* Zero out the frequency of each dmer covered by the chosen segment. */ U32 pos; for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { freqs[ctx->dmerAt[pos]] = 0; } } return bestSegment; } /** * Check the validity of the parameters. * Returns non-zero if the parameters are valid and 0 otherwise. */ -static int COVER_checkParameters(ZDICT_cover_params_t parameters) { +static int COVER_checkParameters(ZDICT_cover_params_t parameters, + size_t maxDictSize) { /* k and d are required parameters */ if (parameters.d == 0 || parameters.k == 0) { return 0; } + /* k <= maxDictSize */ + if (parameters.k > maxDictSize) { + return 0; + } /* d <= k */ if (parameters.d > parameters.k) { return 0; } return 1; } /** * Clean up a context initialized with `COVER_ctx_init()`. */ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { if (!ctx) { return; } if (ctx->suffix) { free(ctx->suffix); ctx->suffix = NULL; } if (ctx->freqs) { free(ctx->freqs); ctx->freqs = NULL; } if (ctx->dmerAt) { free(ctx->dmerAt); ctx->dmerAt = NULL; } if (ctx->offsets) { free(ctx->offsets); ctx->offsets = NULL; } } /** * Prepare a context for dictionary building. * The context is only dependent on the parameter `d` and can used multiple * times. * Returns 1 on success or zero on error. * The context must be destroyed with `COVER_ctx_destroy()`. */ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, unsigned d) { const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n", (COVER_MAX_SAMPLES_SIZE >> 20)); return 0; } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, (U32)totalSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; /* Partial suffix array */ ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* Maps index to the dmerID */ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* The offsets of each file */ ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); COVER_ctx_destroy(ctx); return 0; } ctx->freqs = NULL; ctx->d = d; /* Fill offsets from the samlesSizes */ { U32 i; ctx->offsets[0] = 0; for (i = 1; i <= nbSamples; ++i) { ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; } } DISPLAYLEVEL(2, "Constructing partial suffix array\n"); { /* suffix is a partial suffix array. * It only sorts suffixes by their first parameters.d bytes. * The sort is stable, so each dmer group is sorted by position in input. */ U32 i; for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } /* qsort doesn't take an opaque pointer, so pass as a global */ g_ctx = ctx; qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): * 1. For each position we set dmerAt[position] = dmerID. The dmerID is * (groupBeginPtr - suffix). This allows us to go from position to * dmerID so we can look up values in freq. * 2. We calculate how many samples the dmer occurs in and save it in * freqs[dmerId]. */ COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); ctx->freqs = ctx->suffix; ctx->suffix = NULL; return 1; } /** * Given the prepared context build the dictionary. */ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, COVER_map_t *activeDmers, void *dictBuffer, size_t dictBufferCapacity, ZDICT_cover_params_t parameters) { BYTE *const dict = (BYTE *)dictBuffer; size_t tail = dictBufferCapacity; /* Divide the data up into epochs of equal size. * We will select at least one segment from each epoch. */ const U32 epochs = (U32)(dictBufferCapacity / parameters.k); const U32 epochSize = (U32)(ctx->suffixSize / epochs); size_t epoch; DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, epochSize); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { const U32 epochBegin = (U32)(epoch * epochSize); const U32 epochEnd = epochBegin + epochSize; size_t segmentSize; /* Select a segment */ COVER_segment_t segment = COVER_selectSegment( ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); - /* Trim the segment if necessary and if it is empty then we are done */ + /* If the segment covers no dmers, then we are out of content */ + if (segment.score == 0) { + break; + } + /* Trim the segment if necessary and if it is too small then we are done */ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); - if (segmentSize == 0) { + if (segmentSize < parameters.d) { break; } /* We fill the dictionary from the back to allow the best segments to be * referenced with the smallest offsets. */ tail -= segmentSize; memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; } ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters) { BYTE *const dict = (BYTE *)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; /* Checks */ - if (!COVER_checkParameters(parameters)) { + if (!COVER_checkParameters(parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); return ERROR(GENERIC); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(GENERIC); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Initialize context and activeDmers */ if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, parameters.d)) { return ERROR(GENERIC); } if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); COVER_ctx_destroy(&ctx); return ERROR(GENERIC); } DISPLAYLEVEL(2, "Building dictionary\n"); { const size_t tail = COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, dictBufferCapacity, parameters); const size_t dictionarySize = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, samplesBuffer, samplesSizes, nbSamples, parameters.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", (U32)dictionarySize); } COVER_ctx_destroy(&ctx); COVER_map_destroy(&activeDmers); return dictionarySize; } } /** * COVER_best_t is used for two purposes: * 1. Synchronizing threads. * 2. Saving the best parameters and dictionary. * * All of the methods except COVER_best_init() are thread safe if zstd is * compiled with multithreaded support. */ typedef struct COVER_best_s { - pthread_mutex_t mutex; - pthread_cond_t cond; + ZSTD_pthread_mutex_t mutex; + ZSTD_pthread_cond_t cond; size_t liveJobs; void *dict; size_t dictSize; ZDICT_cover_params_t parameters; size_t compressedSize; } COVER_best_t; /** * Initialize the `COVER_best_t`. */ static void COVER_best_init(COVER_best_t *best) { if (best==NULL) return; /* compatible with init on NULL */ - (void)pthread_mutex_init(&best->mutex, NULL); - (void)pthread_cond_init(&best->cond, NULL); + (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); + (void)ZSTD_pthread_cond_init(&best->cond, NULL); best->liveJobs = 0; best->dict = NULL; best->dictSize = 0; best->compressedSize = (size_t)-1; memset(&best->parameters, 0, sizeof(best->parameters)); } /** * Wait until liveJobs == 0. */ static void COVER_best_wait(COVER_best_t *best) { if (!best) { return; } - pthread_mutex_lock(&best->mutex); + ZSTD_pthread_mutex_lock(&best->mutex); while (best->liveJobs != 0) { - pthread_cond_wait(&best->cond, &best->mutex); + ZSTD_pthread_cond_wait(&best->cond, &best->mutex); } - pthread_mutex_unlock(&best->mutex); + ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ static void COVER_best_destroy(COVER_best_t *best) { if (!best) { return; } COVER_best_wait(best); if (best->dict) { free(best->dict); } - pthread_mutex_destroy(&best->mutex); - pthread_cond_destroy(&best->cond); + ZSTD_pthread_mutex_destroy(&best->mutex); + ZSTD_pthread_cond_destroy(&best->cond); } /** * Called when a thread is about to be launched. * Increments liveJobs. */ static void COVER_best_start(COVER_best_t *best) { if (!best) { return; } - pthread_mutex_lock(&best->mutex); + ZSTD_pthread_mutex_lock(&best->mutex); ++best->liveJobs; - pthread_mutex_unlock(&best->mutex); + ZSTD_pthread_mutex_unlock(&best->mutex); } /** * Called when a thread finishes executing, both on error or success. * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, ZDICT_cover_params_t parameters, void *dict, size_t dictSize) { if (!best) { return; } { size_t liveJobs; - pthread_mutex_lock(&best->mutex); + ZSTD_pthread_mutex_lock(&best->mutex); --best->liveJobs; liveJobs = best->liveJobs; /* If the new dictionary is better */ if (compressedSize < best->compressedSize) { /* Allocate space if necessary */ if (!best->dict || best->dictSize < dictSize) { if (best->dict) { free(best->dict); } best->dict = malloc(dictSize); if (!best->dict) { best->compressedSize = ERROR(GENERIC); best->dictSize = 0; return; } } /* Save the dictionary, parameters, and size */ memcpy(best->dict, dict, dictSize); best->dictSize = dictSize; best->parameters = parameters; best->compressedSize = compressedSize; } - pthread_mutex_unlock(&best->mutex); + ZSTD_pthread_mutex_unlock(&best->mutex); if (liveJobs == 0) { - pthread_cond_broadcast(&best->cond); + ZSTD_pthread_cond_broadcast(&best->cond); } } } /** * Parameters for COVER_tryParameters(). */ typedef struct COVER_tryParameters_data_s { const COVER_ctx_t *ctx; COVER_best_t *best; size_t dictBufferCapacity; ZDICT_cover_params_t parameters; } COVER_tryParameters_data_t; /** * Tries a set of parameters and upates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ static void COVER_tryParameters(void *opaque) { /* Save parameters as local variables */ COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; const COVER_ctx_t *const ctx = data->ctx; const ZDICT_cover_params_t parameters = data->parameters; size_t dictBufferCapacity = data->dictBufferCapacity; size_t totalCompressedSize = ERROR(GENERIC); /* Allocate space for hash table, dict, and freqs */ COVER_map_t activeDmers; BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); goto _cleanup; } if (!dict || !freqs) { DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); goto _cleanup; } /* Copy the frequencies because we need to modify them */ memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); /* Build the dictionary */ { const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, dictBufferCapacity, parameters); dictBufferCapacity = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, parameters.zParams); if (ZDICT_isError(dictBufferCapacity)) { DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); goto _cleanup; } } /* Check total compressed size */ { /* Pointers */ ZSTD_CCtx *cctx; ZSTD_CDict *cdict; void *dst; /* Local variables */ size_t dstCapacity; size_t i; /* Allocate dst with enough space to compress the maximum sized sample */ { size_t maxSampleSize = 0; for (i = 0; i < ctx->nbSamples; ++i) { maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); } dstCapacity = ZSTD_compressBound(maxSampleSize); dst = malloc(dstCapacity); } /* Create the cctx and cdict */ cctx = ZSTD_createCCtx(); cdict = ZSTD_createCDict(dict, dictBufferCapacity, parameters.zParams.compressionLevel); if (!dst || !cctx || !cdict) { goto _compressCleanup; } /* Compress each sample and sum their sizes (or error) */ - totalCompressedSize = 0; + totalCompressedSize = dictBufferCapacity; for (i = 0; i < ctx->nbSamples; ++i) { const size_t size = ZSTD_compress_usingCDict( cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], ctx->samplesSizes[i], cdict); if (ZSTD_isError(size)) { totalCompressedSize = ERROR(GENERIC); goto _compressCleanup; } totalCompressedSize += size; } _compressCleanup: ZSTD_freeCCtx(cctx); ZSTD_freeCDict(cdict); if (dst) { free(dst); } } _cleanup: COVER_best_finish(data->best, totalCompressedSize, parameters, dict, dictBufferCapacity); free(data); COVER_map_destroy(&activeDmers); if (dict) { free(dict); } if (freqs) { free(freqs); } } ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t *parameters) { /* constants */ const unsigned nbThreads = parameters->nbThreads; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); const unsigned kIterations = (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); /* Local variables */ const int displayLevel = parameters->zParams.notificationLevel; unsigned iteration = 1; unsigned d; unsigned k; COVER_best_t best; POOL_ctx *pool = NULL; /* Checks */ if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(GENERIC); } if (nbSamples == 0) { DISPLAYLEVEL(1, "Cover must have at least one input file\n"); return ERROR(GENERIC); } if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", ZDICT_DICTSIZE_MIN); return ERROR(dstSize_tooSmall); } if (nbThreads > 1) { pool = POOL_create(nbThreads, 1); if (!pool) { return ERROR(memory_allocation); } } /* Initialization */ COVER_best_init(&best); /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = parameters->zParams.notificationLevel - 1; + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; /* Loop through d first because each new value needs a new context */ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", kIterations); for (d = kMinD; d <= kMaxD; d += 2) { /* Initialize the context for this value of d */ COVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); return ERROR(GENERIC); } /* Loop through k reusing the same context */ for (k = kMinK; k <= kMaxK; k += kStepSize) { /* Prepare the arguments */ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( sizeof(COVER_tryParameters_data_t)); LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); if (!data) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); COVER_best_destroy(&best); COVER_ctx_destroy(&ctx); POOL_free(pool); return ERROR(GENERIC); } data->ctx = &ctx; data->best = &best; data->dictBufferCapacity = dictBufferCapacity; data->parameters = *parameters; data->parameters.k = k; data->parameters.d = d; data->parameters.steps = kSteps; + data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ - if (!COVER_checkParameters(data->parameters)) { + if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { DISPLAYLEVEL(1, "Cover parameters incorrect\n"); free(data); continue; } /* Call the function and pass ownership of data to it */ COVER_best_start(&best); if (pool) { POOL_add(pool, &COVER_tryParameters, data); } else { COVER_tryParameters(data); } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", (U32)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); COVER_ctx_destroy(&ctx); } LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); /* Fill the output buffer and parameters with output of the best parameters */ { const size_t dictSize = best.dictSize; if (ZSTD_isError(best.compressedSize)) { const size_t compressedSize = best.compressedSize; COVER_best_destroy(&best); POOL_free(pool); return compressedSize; } *parameters = best.parameters; memcpy(dictBuffer, best.dict, dictSize); COVER_best_destroy(&best); POOL_free(pool); return dictSize; } } Index: vendor/zstd/dist/lib/dictBuilder/zdict.c =================================================================== --- vendor/zstd/dist/lib/dictBuilder/zdict.c (revision 325596) +++ vendor/zstd/dist/lib/dictBuilder/zdict.c (revision 325597) @@ -1,1072 +1,1075 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /*-************************************** * Tuning parameters ****************************************/ #define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20) #define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) /*-************************************** * Compiler Options ****************************************/ /* Unix Large Files support (>4GB) */ #define _FILE_OFFSET_BITS 64 #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ # define _LARGEFILE_SOURCE #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ # define _LARGEFILE64_SOURCE #endif /*-************************************* * Dependencies ***************************************/ #include /* malloc, free */ #include /* memset */ #include /* fprintf, fopen, ftello64 */ #include /* clock */ #include "mem.h" /* read */ #include "fse.h" /* FSE_normalizeCount, FSE_writeNCount */ #define HUF_STATIC_LINKING_ONLY #include "huf.h" /* HUF_buildCTable, HUF_writeCTable */ #include "zstd_internal.h" /* includes zstd.h */ #include "xxhash.h" /* XXH64 */ #include "divsufsort.h" #ifndef ZDICT_STATIC_LINKING_ONLY # define ZDICT_STATIC_LINKING_ONLY #endif #include "zdict.h" /*-************************************* * Constants ***************************************/ #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define DICTLISTSIZE_DEFAULT 10000 #define NOISELENGTH 32 -static const int g_compressionLevel_default = 6; +static const int g_compressionLevel_default = 3; static const U32 g_selectivity_default = 9; /*-************************************* * Console display ***************************************/ #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } #define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } static void ZDICT_printHex(const void* ptr, size_t length) { const BYTE* const b = (const BYTE*)ptr; size_t u; for (u=0; u126) c = '.'; /* non-printable char */ DISPLAY("%c", c); } } /*-******************************************************** * Helper functions **********************************************************/ unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); } const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dictBuffer + 4); } /*-******************************************************** * Dictionary training functions **********************************************************/ static unsigned ZDICT_NbCommonBytes (register size_t val) { if (MEM_isLittleEndian()) { if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanForward64( &r, (U64)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctzll((U64)val) >> 3); # else static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r=0; _BitScanForward( &r, (U32)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; # endif } } else { /* Big Endian CPU */ if (MEM_64bits()) { # if defined(_MSC_VER) && defined(_WIN64) unsigned long r = 0; _BitScanReverse64( &r, val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clzll(val) >> 3); # else unsigned r; const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } r += (!val); return r; # endif } else { /* 32 bits */ # if defined(_MSC_VER) unsigned long r = 0; _BitScanReverse( &r, (unsigned long)val ); return (unsigned)(r>>3); # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else unsigned r; if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } r += (!val); return r; # endif } } } /*! ZDICT_count() : Count the nb of common bytes between 2 pointers. Note : this function presumes end of buffer followed by noisy guard band. */ static size_t ZDICT_count(const void* pIn, const void* pMatch) { const char* const pStart = (const char*)pIn; for (;;) { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); if (!diff) { pIn = (const char*)pIn+sizeof(size_t); pMatch = (const char*)pMatch+sizeof(size_t); continue; } pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); return (size_t)((const char*)pIn - pStart); } } typedef struct { U32 pos; U32 length; U32 savings; } dictItem; static void ZDICT_initDictItem(dictItem* d) { d->pos = 1; d->length = 0; d->savings = (U32)(-1); } #define LLIMIT 64 /* heuristic determined experimentally */ #define MINMATCHLENGTH 7 /* heuristic determined experimentally */ static dictItem ZDICT_analyzePos( BYTE* doneMarks, const int* suffix, U32 start, const void* buffer, U32 minRatio, U32 notificationLevel) { U32 lengthList[LLIMIT] = {0}; U32 cumulLength[LLIMIT] = {0}; U32 savings[LLIMIT] = {0}; const BYTE* b = (const BYTE*)buffer; size_t length; size_t maxLength = LLIMIT; size_t pos = suffix[start]; U32 end = start; dictItem solution; /* init */ memset(&solution, 0, sizeof(solution)); doneMarks[pos] = 1; /* trivial repetition cases */ if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2)) ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3)) ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) { /* skip and mark segment */ U16 u16 = MEM_read16(b+pos+4); U32 u, e = 6; while (MEM_read16(b+pos+e) == u16) e+=2 ; if (b[pos+e] == b[pos+e-1]) e++; for (u=1; u=MINMATCHLENGTH); /* look backward */ do { length = ZDICT_count(b + pos, b + *(suffix+start-1)); if (length >=MINMATCHLENGTH) start--; } while(length >= MINMATCHLENGTH); /* exit if not found a minimum nb of repetitions */ if (end-start < minRatio) { U32 idx; for(idx=start; idx= %i at pos %7u ", (U32)(end-start), MINMATCHLENGTH, (U32)pos); DISPLAYLEVEL(4, "\n"); for (searchLength = MINMATCHLENGTH ; ; searchLength++) { BYTE currentChar = 0; U32 currentCount = 0; U32 currentID = refinedStart; U32 id; U32 selectedCount = 0; U32 selectedID = currentID; for (id =refinedStart; id < refinedEnd; id++) { if (b[ suffix[id] + searchLength] != currentChar) { if (currentCount > selectedCount) { selectedCount = currentCount; selectedID = currentID; } currentID = id; currentChar = b[ suffix[id] + searchLength]; currentCount = 0; } currentCount ++; } if (currentCount > selectedCount) { /* for last */ selectedCount = currentCount; selectedID = currentID; } if (selectedCount < minRatio) break; refinedStart = selectedID; refinedEnd = refinedStart + selectedCount; } /* evaluate gain based on new ref */ start = refinedStart; pos = suffix[refinedStart]; end = start; memset(lengthList, 0, sizeof(lengthList)); /* look forward */ do { end++; length = ZDICT_count(b + pos, b + suffix[end]); if (length >= LLIMIT) length = LLIMIT-1; lengthList[length]++; } while (length >=MINMATCHLENGTH); /* look backward */ length = MINMATCHLENGTH; while ((length >= MINMATCHLENGTH) & (start > 0)) { length = ZDICT_count(b + pos, b + suffix[start - 1]); if (length >= LLIMIT) length = LLIMIT - 1; lengthList[length]++; if (length >= MINMATCHLENGTH) start--; } /* largest useful length */ memset(cumulLength, 0, sizeof(cumulLength)); cumulLength[maxLength-1] = lengthList[maxLength-1]; for (i=(int)(maxLength-2); i>=0; i--) cumulLength[i] = cumulLength[i+1] + lengthList[i]; for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; maxLength = i; /* reduce maxLength in case of final into repetitive data */ { U32 l = (U32)maxLength; BYTE const c = b[pos + maxLength-1]; while (b[pos+l-2]==c) l--; maxLength = l; } if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */ /* calculate savings */ savings[5] = 0; for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) savings[i] = savings[i-1] + (lengthList[i] * (i-3)); DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f) \n", (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; solution.savings = savings[maxLength]; /* mark positions done */ { U32 id; for (id=start; id solution.length) length = solution.length; } pEnd = (U32)(testedPos + length); for (p=testedPos; ppos; const U32 eltEnd = elt.pos + elt.length; const char* const buf = (const char*) buffer; /* tail overlap */ U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */ /* append */ U32 const addedLength = table[u].pos - elt.pos; table[u].length += addedLength; table[u].pos = elt.pos; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ table[u].savings += elt.length / 8; /* rough approx bonus */ elt = table[u]; /* sort : improve rank */ while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } } /* front overlap */ for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ /* append */ int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); table[u].savings += elt.length / 8; /* rough approx bonus */ if (addedLength > 0) { /* otherwise, elt fully included into existing */ table[u].length += addedLength; table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ } /* sort : improve rank */ elt = table[u]; while ((u>1) && (table[u-1].savings < elt.savings)) table[u] = table[u-1], u--; table[u] = elt; return u; } if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); table[u].pos = elt.pos; table[u].savings += (U32)(elt.savings * addedLength / elt.length); table[u].length = MIN(elt.length, table[u].length + 1); return u; } } } return 0; } static void ZDICT_removeDictItem(dictItem* table, U32 id) { - /* convention : first element is nb of elts */ - U32 const max = table->pos; + /* convention : table[0].pos stores nb of elts */ + U32 const max = table[0].pos; U32 u; if (!id) return; /* protection, should never happen */ for (u=id; upos--; } static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer) { /* merge if possible */ U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer); if (mergeId) { U32 newMerge = 1; while (newMerge) { newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer); if (newMerge) ZDICT_removeDictItem(table, mergeId); mergeId = newMerge; } return; } /* insert */ { U32 current; U32 nextElt = table->pos; if (nextElt >= maxSize) nextElt = maxSize-1; current = nextElt-1; while (table[current].savings < elt.savings) { table[current+1] = table[current]; current--; } table[current+1] = elt; table->pos = nextElt+1; } } static U32 ZDICT_dictSize(const dictItem* dictList) { U32 u, dictSize = 0; for (u=1; u=l) { \ if (ZDICT_clockSpan(displayClock) > refreshRate) \ { displayClock = clock(); DISPLAY(__VA_ARGS__); \ if (notificationLevel>=4) fflush(stderr); } } /* init */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) { result = ERROR(memory_allocation); goto _cleanup; } if (minRatio < MINRATIO) minRatio = MINRATIO; memset(doneMarks, 0, bufferSize+16); /* limit sample set size (divsufsort limitation)*/ if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20)); while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; /* sort */ DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20)); { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } suffix[bufferSize] = (int)bufferSize; /* leads into noise */ suffix0[0] = (int)bufferSize; /* leads into noise */ /* build reverse suffix sort */ { size_t pos; for (pos=0; pos < bufferSize; pos++) reverseSuffix[suffix[pos]] = (U32)pos; /* note filePos tracks borders between samples. It's not used at this stage, but planned to become useful in a later update */ filePos[0] = 0; for (pos=1; pos> 21); } } typedef struct { ZSTD_CCtx* ref; ZSTD_CCtx* zc; void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ } EStats_ress_t; #define MAXREPOFFSET 1024 static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets, const void* src, size_t srcSize, U32 notificationLevel) { size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog); size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ { size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0); if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; } } cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc); /* literals stats */ { const BYTE* bytePtr; for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) countLit[*bytePtr]++; } /* seqStats */ { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ZSTD_seqToCodes(seqStorePtr); { const BYTE* codePtr = seqStorePtr->ofCode; U32 u; for (u=0; umlCode; U32 u; for (u=0; ullCode; U32 u; for (u=0; u= 2) { /* rep offsets */ const seqDef* const seq = seqStorePtr->sequencesStart; U32 offset1 = seq[0].offset - 3; U32 offset2 = seq[1].offset - 3; if (offset1 >= MAXREPOFFSET) offset1 = 0; if (offset2 >= MAXREPOFFSET) offset2 = 0; repOffsets[offset1] += 3; repOffsets[offset2] += 1; } } } } static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles) { size_t total=0; unsigned u; for (u=0; u0; u--) { offsetCount_t tmp; if (table[u-1].count >= table[u].count) break; tmp = table[u-1]; table[u-1] = table[u]; table[u] = tmp; } } #define OFFCODE_MAX 30 /* only applicable to first block */ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, unsigned compressionLevel, const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, const void* dictBuffer, size_t dictBufferSize, unsigned notificationLevel) { U32 countLit[256]; HUF_CREATE_STATIC_CTABLE(hufTable, 255); U32 offcodeCount[OFFCODE_MAX+1]; short offcodeNCount[OFFCODE_MAX+1]; U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); U32 matchLengthCount[MaxML+1]; short matchLengthNCount[MaxML+1]; U32 litLengthCount[MaxLL+1]; short litLengthNCount[MaxLL+1]; U32 repOffset[MAXREPOFFSET]; offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; EStats_ress_t esr; ZSTD_parameters params; U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; size_t pos = 0, errorCode; size_t eSize = 0; size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); BYTE* dstPtr = (BYTE*)dstBuffer; /* init */ esr.ref = ZSTD_createCCtx(); esr.zc = ZSTD_createCCtx(); esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); if (!esr.ref || !esr.zc || !esr.workPlace) { eSize = ERROR(memory_allocation); DISPLAYLEVEL(1, "Not enough memory \n"); goto _cleanup; } if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1; for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1; memset(repOffset, 0, sizeof(repOffset)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset(bestRepOffset, 0, sizeof(bestRepOffset)); - if (compressionLevel==0) compressionLevel = g_compressionLevel_default; + if (compressionLevel<=0) compressionLevel = g_compressionLevel_default; params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); { size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0); if (ZSTD_isError(beginResult)) { DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult)); eSize = ERROR(GENERIC); goto _cleanup; } } /* collect stats on all files */ for (u=0; u dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; { size_t const dictSize = hSize + dictContentSize; char* dictEnd = (char*)dictBuffer + dictSize; memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); memcpy(dictBuffer, header, hSize); return dictSize; } } size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t params) { int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel; U32 const notificationLevel = params.notificationLevel; size_t hSize = 8; /* calculate entropy tables */ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ DISPLAYLEVEL(2, "statistics ... \n"); { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize, compressionLevel, samplesBuffer, samplesSizes, nbSamples, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, notificationLevel); if (ZDICT_isError(eSize)) return eSize; hSize += eSize; } /* add dictionary header (after entropy tables) */ MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY); { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; U32 const dictID = params.dictID ? params.dictID : compliantID; MEM_writeLE32((char*)dictBuffer+4, dictID); } if (hSize + dictContentSize < dictBufferCapacity) memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); return MIN(dictBufferCapacity, hSize+dictContentSize); } /*! ZDICT_trainFromBuffer_unsafe_legacy() : * Warning : `samplesBuffer` must be followed by noisy guard band. * @return : size of dictionary, or an error code which can be tested with ZDICT_isError() */ size_t ZDICT_trainFromBuffer_unsafe_legacy( void* dictBuffer, size_t maxDictSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16)); dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList)); unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel; unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity; size_t const targetDictSize = maxDictSize; size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); size_t dictSize = 0; U32 const notificationLevel = params.zParams.notificationLevel; /* checks */ if (!dictList) return ERROR(memory_allocation); if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */ if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */ /* init */ ZDICT_initDictItem(dictList); /* build dictionary */ ZDICT_trainBuffer_legacy(dictList, dictListSize, samplesBuffer, samplesBuffSize, samplesSizes, nbSamples, minRep, notificationLevel); /* display best matches */ if (params.zParams.notificationLevel>= 3) { U32 const nb = MIN(25, dictList[0].pos); U32 const dictContentSize = ZDICT_dictSize(dictList); U32 u; DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize); DISPLAYLEVEL(3, "list %u best segments \n", nb-1); for (u=1; u samplesBuffSize) || ((pos + length) > samplesBuffSize)) return ERROR(GENERIC); /* should never happen */ DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", u, length, pos, dictList[u].savings); ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); DISPLAYLEVEL(3, "| \n"); } } /* create dictionary */ { U32 dictContentSize = ZDICT_dictSize(dictList); if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ if (dictContentSize < targetDictSize/4) { DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize); if (samplesBuffSize < 10 * targetDictSize) DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20)); if (minRep > MINRATIO) { DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); } } if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { U32 proposedSelectivity = selectivity-1; while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize); DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); } /* limit dictionary size */ { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */ U32 currentSize = 0; U32 n; for (n=1; n targetDictSize) { currentSize -= dictList[n].length; break; } } dictList->pos = n; dictContentSize = currentSize; } /* build dict content */ { U32 u; BYTE* ptr = (BYTE*)dictBuffer + maxDictSize; for (u=1; upos; u++) { U32 l = dictList[u].length; ptr -= l; if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */ memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l); } } dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize, samplesBuffer, samplesSizes, nbSamples, params.zParams); } /* clean up */ free(dictList); return dictSize; } /* issue : samplesBuffer need to be followed by a noisy guard band. * work around : duplicate the buffer, and add the noise */ size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t params) { size_t result; void* newBuff; size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */ newBuff = malloc(sBuffSize + NOISELENGTH); if (!newBuff) return ERROR(memory_allocation); memcpy(newBuff, samplesBuffer, sBuffSize); ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */ result = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff, samplesSizes, nbSamples, params); free(newBuff); return result; } size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_cover_params_t params; memset(¶ms, 0, sizeof(params)); params.d = 8; params.steps = 4; + /* Default to level 6 since no compression level information is avaialble */ + params.zParams.compressionLevel = 6; return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, ¶ms); } size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) { ZDICT_params_t params; memset(¶ms, 0, sizeof(params)); return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, samplesBuffer, samplesSizes, nbSamples, params); } Index: vendor/zstd/dist/lib/dictBuilder/zdict.h =================================================================== --- vendor/zstd/dist/lib/dictBuilder/zdict.h (revision 325596) +++ vendor/zstd/dist/lib/dictBuilder/zdict.h (revision 325597) @@ -1,210 +1,211 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef DICTBUILDER_H_001 #define DICTBUILDER_H_001 #if defined (__cplusplus) extern "C" { #endif /*====== Dependencies ======*/ #include /* size_t */ /* ===== ZDICTLIB_API : control library symbols visibility ===== */ #ifndef ZDICTLIB_VISIBILITY # if defined(__GNUC__) && (__GNUC__ >= 4) # define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) # else # define ZDICTLIB_VISIBILITY # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) # define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) # define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else # define ZDICTLIB_API ZDICTLIB_VISIBILITY #endif /*! ZDICT_trainFromBuffer(): * Train a dictionary from an array of samples. * Uses ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, but this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); /*====== Helper functions ======*/ ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); #ifdef ZDICT_STATIC_LINKING_ONLY /* ==================================================================================== * The definitions in this section are considered experimental. * They should never be used with a dynamic library, as they may change in the future. * They are provided for advanced usages. * Use them only in association with static linking. * ==================================================================================== */ typedef struct { int compressionLevel; /* 0 means default; target a specific zstd compression level */ unsigned notificationLevel; /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ unsigned dictID; /* 0 means auto mode (32-bits random value); other : force dictID value */ } ZDICT_params_t; /*! ZDICT_cover_params_t: * For all values 0 means default. * k and d are the only required parameters. */ typedef struct { unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ ZDICT_params_t zParams; } ZDICT_cover_params_t; /*! ZDICT_trainFromBuffer_cover(): * Train a dictionary from an array of samples using the COVER algorithm. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, but this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t parameters); /*! ZDICT_optimizeTrainFromBuffer_cover(): * The same requirements as above hold for all the parameters except `parameters`. * This function tries many parameter combinations and picks the best parameters. * `*parameters` is filled with the best parameters found, and the dictionary * constructed with those parameters is stored in `dictBuffer`. * * All of the parameters d, k, steps are optional. * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}. * if steps is zero it defaults to its default value. * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048]. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * On success `*parameters` contains the parameters selected. * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. */ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_cover_params_t *parameters); /*! ZDICT_finalizeDictionary(): * Given a custom content as a basis for dictionary, and a set of samples, * finalize dictionary by adding headers and statistics. * * Samples must be stored concatenated in a flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order. * * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes. * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes. * * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`), * or an error code, which can be tested by ZDICT_isError(). * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. * Note 2: dictBuffer and dictContent can overlap */ #define ZDICT_CONTENTSIZE_MIN 128 #define ZDICT_DICTSIZE_MIN 256 ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, const void* dictContent, size_t dictContentSize, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, ZDICT_params_t parameters); typedef struct { unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ ZDICT_params_t zParams; } ZDICT_legacy_params_t; /*! ZDICT_trainFromBuffer_legacy(): * Train a dictionary from an array of samples. * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. * The resulting dictionary will be saved into `dictBuffer`. * `parameters` is optional and can be provided with values set to 0 to mean "default". * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) * or an error code, which can be tested with ZDICT_isError(). * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. * It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`. * In general, it's recommended to provide a few thousands samples, but this can vary a lot. * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. */ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, ZDICT_legacy_params_t parameters); /* Deprecation warnings */ /* It is generally possible to disable deprecation warnings from compiler, for example with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ #ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS # define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ #else # define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API # elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) # elif (ZDICT_GCC_VERSION >= 301) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) # elif defined(_MSC_VER) # define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") # define ZDICT_DEPRECATED(message) ZDICTLIB_API # endif #endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); #endif /* ZDICT_STATIC_LINKING_ONLY */ #if defined (__cplusplus) } #endif #endif /* DICTBUILDER_H_001 */ Index: vendor/zstd/dist/lib/legacy/zstd_legacy.h =================================================================== --- vendor/zstd/dist/lib/legacy/zstd_legacy.h (revision 325596) +++ vendor/zstd/dist/lib/legacy/zstd_legacy.h (revision 325597) @@ -1,378 +1,379 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_LEGACY_H #define ZSTD_LEGACY_H #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include "mem.h" /* MEM_STATIC */ #include "error_private.h" /* ERROR */ #include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer */ #if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) # undef ZSTD_LEGACY_SUPPORT # define ZSTD_LEGACY_SUPPORT 8 #endif #if (ZSTD_LEGACY_SUPPORT <= 1) # include "zstd_v01.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 2) # include "zstd_v02.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 3) # include "zstd_v03.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 4) # include "zstd_v04.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 5) # include "zstd_v05.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 6) # include "zstd_v06.h" #endif #if (ZSTD_LEGACY_SUPPORT <= 7) # include "zstd_v07.h" #endif /** ZSTD_isLegacy() : @return : > 0 if supported by legacy decoder. 0 otherwise. return value is the version. */ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize) { U32 magicNumberLE; if (srcSize<4) return 0; magicNumberLE = MEM_readLE32(src); switch(magicNumberLE) { #if (ZSTD_LEGACY_SUPPORT <= 1) case ZSTDv01_magicNumberLE:return 1; #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case ZSTDv02_magicNumber : return 2; #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case ZSTDv03_magicNumber : return 3; #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case ZSTDv04_magicNumber : return 4; #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case ZSTDv05_MAGICNUMBER : return 5; #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case ZSTDv06_MAGICNUMBER : return 6; #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case ZSTDv07_MAGICNUMBER : return 7; #endif default : return 0; } } MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize) { U32 const version = ZSTD_isLegacy(src, srcSize); if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */ #if (ZSTD_LEGACY_SUPPORT <= 5) if (version==5) { ZSTDv05_parameters fParams; size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.srcSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) if (version==6) { ZSTDv06_frameParams fParams; size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) if (version==7) { ZSTDv07_frameParams fParams; size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize); if (frResult != 0) return 0; return fParams.frameContentSize; } #endif return 0; /* should not be possible */ } MEM_STATIC size_t ZSTD_decompressLegacy( void* dst, size_t dstCapacity, const void* src, size_t compressedSize, const void* dict,size_t dictSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { size_t result; ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv05_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { size_t result; ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv06_freeDCtx(zd); return result; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { size_t result; ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx(); if (zd==NULL) return ERROR(memory_allocation); result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); ZSTDv07_freeDCtx(zd); return result; } #endif default : return ERROR(prefix_unknown); } } MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t compressedSize) { U32 const version = ZSTD_isLegacy(src, compressedSize); switch(version) { #if (ZSTD_LEGACY_SUPPORT <= 1) case 1 : return ZSTDv01_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 2) case 2 : return ZSTDv02_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 3) case 3 : return ZSTDv03_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZSTDv04_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : return ZSTDv05_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : return ZSTDv06_findFrameCompressedSize(src, compressedSize); #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : return ZSTDv07_findFrameCompressedSize(src, compressedSize); #endif default : return ERROR(prefix_unknown); } } MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) { switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext); #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext); #endif } } MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, const void* dict, size_t dictSize) { if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); switch(newVersion) { default : case 1 : case 2 : case 3 : (void)dict; (void)dictSize; return 0; #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv04_decompressInit(dctx); ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext; if (dctx==NULL) return ERROR(memory_allocation); ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize); *legacyContext = dctx; return 0; } #endif } } MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { switch(version) { default : case 1 : case 2 : case 3 : (void)legacyContext; (void)output; (void)input; return ERROR(version_unsupported); #if (ZSTD_LEGACY_SUPPORT <= 4) case 4 : { ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 5) case 5 : { ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 6) case 6 : { ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif #if (ZSTD_LEGACY_SUPPORT <= 7) case 7 : { ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext; const void* src = (const char*)input->src + input->pos; size_t readSize = input->size - input->pos; void* dst = (char*)output->dst + output->pos; size_t decodedSize = output->size - output->pos; size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize); output->pos += decodedSize; input->pos += readSize; return hintSize; } #endif } } #if defined (__cplusplus) } #endif #endif /* ZSTD_LEGACY_H */ Index: vendor/zstd/dist/lib/legacy/zstd_v01.c =================================================================== --- vendor/zstd/dist/lib/legacy/zstd_v01.c (revision 325596) +++ vendor/zstd/dist/lib/legacy/zstd_v01.c (revision 325597) @@ -1,2126 +1,2127 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include "zstd_v01.h" #include "error_private.h" /****************************************** * Static allocation ******************************************/ /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /* FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /**************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION /**************************************************************** * Byte symbol type ****************************************************************/ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ /**************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ #ifndef MEM_ACCESS_MODULE #define MEM_ACCESS_MODULE /**************************************************************** * Basic Types *****************************************************************/ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif #endif /* MEM_ACCESS_MODULE */ /**************************************************************** * Memory I/O *****************************************************************/ /* FSE_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define FSE_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define FSE_FORCE_MEMORY_ACCESS 1 # endif #endif static unsigned FSE_32bits(void) { return sizeof(void*)==4; } static unsigned FSE_isLittleEndian(void) { const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } #elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } #else static U16 FSE_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } static U32 FSE_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } static U64 FSE_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } #endif // FSE_FORCE_MEMORY_ACCESS static U16 FSE_readLE16(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } static U32 FSE_readLE32(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } static U64 FSE_readLE64(const void* memPtr) { if (FSE_isLittleEndian()) return FSE_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } static size_t FSE_readLEST(const void* memPtr) { if (FSE_32bits()) return (size_t)FSE_readLE32(memPtr); else return (size_t)FSE_readLE64(memPtr); } /**************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /**************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /**************************************************************** * Complex types ****************************************************************/ typedef struct { int deltaFindState; U32 deltaNbBits; } FSE_symbolCompressionTransform; /* total 8 bytes */ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /**************************************************************** * Internal functions ****************************************************************/ FORCE_INLINE unsigned FSE_highbit32 (register U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ return 31 - __builtin_clz (val); # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /**************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } #define FSE_DECODE_TYPE FSE_decode_t typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ static size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */ const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge; if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* Init, lay down lowprob symbols */ DTableH[0].tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; ifastMode = (U16)noLarge; return 0; } /****************************************** * FSE byte symbol ******************************************/ #ifndef FSE_COMMONDEFS_ONLY static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); } static short FSE_abs(short a) { return a<0? -a : a; } /**************************************************************** * Header bitstream management ****************************************************************/ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { const BYTE* const istart = (const BYTE*) headerBuffer; const BYTE* const iend = istart + hbSize; const BYTE* ip = istart; int nbBits; int remaining; int threshold; U32 bitStream; int bitCount; unsigned charnum = 0; int previous0 = 0; if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong; bitStream = FSE_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge; bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = FSE_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall; while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = FSE_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = FSE_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC; *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong; return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } /* FSE_initDStream * Initialize a FSE_DStream_t. * srcBuffer must point at the beginning of an FSE block. * The function result is the size of the FSE_block (== srcSize). * If srcSize is too small, the function will return an errorCode; */ static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong; if (srcSize >= sizeof(size_t)) { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = FSE_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ bitD->bitsConsumed = 8 - FSE_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; default:; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ bitD->bitsConsumed = 8 - FSE_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } /*!FSE_lookBits * Provides next n bits from the bitContainer. * bitContainer is not modified (bits are still present for next read/look) * On 32-bits, maxNbBits==25 * On 64-bits, maxNbBits==57 * return : value extracted. */ static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*!FSE_readBits * Read next n bits from the bitContainer. * On 32-bits, don't read more than maxNbBits==25 * On 64-bits, don't read more than maxNbBits==57 * Use the fast variant *only* if n >= 1. * return : value extracted. */ static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits) { size_t value = FSE_lookBits(bitD, nbBits); FSE_skipBits(bitD, nbBits); return value; } static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ { size_t value = FSE_lookBitsFast(bitD, nbBits); FSE_skipBits(bitD, nbBits); return value; } static unsigned FSE_reloadDStream(FSE_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return FSE_DStream_tooFar; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = FSE_readLEST(bitD->ptr); return FSE_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer; return FSE_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; U32 result = FSE_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = FSE_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt) { const void* ptr = dt; const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog); FSE_reloadDStream(bitD); DStatePtr->table = dt + 1; } static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = FSE_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = FSE_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } /* FSE_endOfDStream Tells if bitD has reached end of bitStream or not */ static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD) { return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8)); } static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; FSE_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ FSE_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ FSE_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */ while (1) { if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */ return (size_t)-FSE_ERROR_corruptionDetected; } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */ /* select fast mode (static) */ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } /* ******************************************************* * Huff0 : Huffman block compression *********************************************************/ #define HUF_MAX_SYMBOL_VALUE 255 #define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */ #define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */ #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif typedef struct HUF_CElt_s { U16 val; BYTE nbBits; } HUF_CElt ; typedef struct nodeElt_s { U32 count; U16 parent; BYTE byte; BYTE nbBits; } nodeElt; /* ******************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DElt; static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 weightTotal; U32 maxBits; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; U32 nextRankStart; void* ptr = DTable+1; HUF_DElt* const dt = (HUF_DElt*)ptr; if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; iSize = ip[0]; FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, sizeof(huffWeight)); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankVal, 0, sizeof(rankVal)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected; rankVal[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected; /* get last non-null symbol weight (implied, total must be 2^n) */ maxBits = FSE_highbit32(weightTotal) + 1; if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */ DTable[0] = (U16)maxBits; { U32 total = 1 << maxBits; U32 rest = total - weightTotal; U32 verif = 1 << FSE_highbit32(rest); U32 lastWeight = FSE_highbit32(rest) + 1; if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankVal[lastWeight]++; } /* check tree construction validity */ if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=maxBits; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n<=oSize; n++) { const U32 w = huffWeight[n]; const U32 length = (1 << w) >> 1; U32 i; HUF_DElt D; D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize+1; } static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog) { const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; FSE_skipBits(Dstream, dt[val].nbBits); return c; } static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-15; const void* ptr = DTable; const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; const U32 dtLog = DTable[0]; size_t errorCode; U32 reloadStatus; /* Init */ const U16* jumpTable = (const U16*)cSrc; const size_t length1 = FSE_readLE16(jumpTable); const size_t length2 = FSE_readLE16(jumpTable+1); const size_t length3 = FSE_readLE16(jumpTable+2); const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; // check coherency !! const char* const start1 = (const char*)(cSrc) + 6; const char* const start2 = start1 + length1; const char* const start3 = start2 + length2; const char* const start4 = start3 + length3; FSE_DStream_t bitD1, bitD2, bitD3, bitD4; if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; errorCode = FSE_initDStream(&bitD1, start1, length1); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD2, start2, length2); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD3, start3, length3); if (FSE_isError(errorCode)) return errorCode; errorCode = FSE_initDStream(&bitD4, start4, length4); if (FSE_isError(errorCode)) return errorCode; reloadStatus=FSE_reloadDStream(&bitD2); /* 16 symbols per loop */ for ( ; (reloadStatus12)) FSE_reloadDStream(&Dstream) #define HUF_DECODE_SYMBOL_2(n, Dstream) \ op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \ if (FSE_32bits()) FSE_reloadDStream(&Dstream) HUF_DECODE_SYMBOL_1( 0, bitD1); HUF_DECODE_SYMBOL_1( 1, bitD2); HUF_DECODE_SYMBOL_1( 2, bitD3); HUF_DECODE_SYMBOL_1( 3, bitD4); HUF_DECODE_SYMBOL_2( 4, bitD1); HUF_DECODE_SYMBOL_2( 5, bitD2); HUF_DECODE_SYMBOL_2( 6, bitD3); HUF_DECODE_SYMBOL_2( 7, bitD4); HUF_DECODE_SYMBOL_1( 8, bitD1); HUF_DECODE_SYMBOL_1( 9, bitD2); HUF_DECODE_SYMBOL_1(10, bitD3); HUF_DECODE_SYMBOL_1(11, bitD4); HUF_DECODE_SYMBOL_0(12, bitD1); HUF_DECODE_SYMBOL_0(13, bitD2); HUF_DECODE_SYMBOL_0(14, bitD3); HUF_DECODE_SYMBOL_0(15, bitD4); } if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */ return (size_t)-FSE_ERROR_corruptionDetected; /* tail */ { // bitTail = bitD1; // *much* slower : -20% !??! FSE_DStream_t bitTail; bitTail.ptr = bitD1.ptr; bitTail.bitsConsumed = bitD1.bitsConsumed; bitTail.bitContainer = bitD1.bitContainer; // required in case of FSE_DStream_endOfBuffer bitTail.start = start1; for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; ip += errorCode; cSrcSize -= errorCode; return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable); } #endif /* FSE_COMMONDEFS_ONLY */ /* zstd - standard compression library Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /**************************************************************** * Tuning parameters *****************************************************************/ /* MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect */ #define ZSTD_MEMORY_USAGE 17 /************************************** CPU Feature Detection **************************************/ /* * Automated efficient unaligned memory access detection * Based on known hardware architectures * This list will be updated thanks to feedbacks */ #if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ || defined(__ARM_FEATURE_UNALIGNED) \ || defined(__i386__) || defined(__x86_64__) \ || defined(_M_IX86) || defined(_M_X64) \ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ || (defined(_M_ARM) && (_M_ARM >= 7)) # define ZSTD_UNALIGNED_ACCESS 1 #else # define ZSTD_UNALIGNED_ACCESS 0 #endif /******************************************************** * Includes *********************************************************/ #include /* calloc */ #include /* memcpy, memmove */ #include /* debug : printf */ /******************************************************** * Compiler specifics *********************************************************/ #ifdef __AVX2__ # include /* AVX2 intrinsics */ #endif #ifdef _MSC_VER /* Visual Studio */ # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4324) /* disable: C4324: padded structure */ #endif #ifndef MEM_ACCESS_MODULE #define MEM_ACCESS_MODULE /******************************************************** * Basic Types *********************************************************/ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; #endif #endif /* MEM_ACCESS_MODULE */ /******************************************************** * Constants *********************************************************/ static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */ #define HASH_LOG (ZSTD_MEMORY_USAGE - 2) #define HASH_TABLESIZE (1 << HASH_LOG) #define HASH_MASK (HASH_TABLESIZE - 1) #define KNUTH 2654435761 #define BIT7 128 #define BIT6 64 #define BIT5 32 #define BIT4 16 #define KB *(1 <<10) #define MB *(1 <<20) #define GB *(1U<<30) #define BLOCKSIZE (128 KB) /* define, for static allocation */ #define WORKPLACESIZE (BLOCKSIZE*3) #define MINMATCH 4 #define MLbits 7 #define LLbits 6 #define Offbits 5 #define MaxML ((1<>3]; #else U32 hashTable[HASH_TABLESIZE]; #endif BYTE buffer[WORKPLACESIZE]; } cctxi_t; /************************************** * Error Management **************************************/ /* published entry point */ unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); } /************************************** * Tool functions **************************************/ #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ #define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */ #define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /************************************************************** * Decompression code **************************************************************/ size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { const BYTE* const in = (const BYTE* const)src; BYTE headerFlags; U32 cSize; if (srcSize < 3) return ERROR(srcSize_wrong); headerFlags = *in; cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); bpPtr->blockType = (blockType_t)(headerFlags >> 6); bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; if (bpPtr->blockType == bt_end) return 0; if (bpPtr->blockType == bt_rle) return 1; return cSize; } static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); memcpy(dst, src, srcSize); return srcSize; } static size_t ZSTD_decompressLiterals(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { BYTE* op = (BYTE*)dst; BYTE* const oend = op + maxDstSize; const BYTE* ip = (const BYTE*)src; size_t errorCode; size_t litSize; /* check : minimum 2, for litSize, +1, for content */ if (srcSize <= 3) return ERROR(corruption_detected); litSize = ip[1] + (ip[0]<<8); litSize += ((ip[-3] >> 3) & 7) << 16; // mmmmh.... op = oend - litSize; (void)ctx; if (litSize > maxDstSize) return ERROR(dstSize_tooSmall); errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2); if (FSE_isError(errorCode)) return ERROR(GENERIC); return litSize; } size_t ZSTDv01_decodeLiteralsBlock(void* ctx, void* dst, size_t maxDstSize, const BYTE** litStart, size_t* litSize, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; BYTE* const ostart = (BYTE* const)dst; BYTE* const oend = ostart + maxDstSize; blockProperties_t litbp; size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp); if (ZSTDv01_isError(litcSize)) return litcSize; if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); ip += ZSTD_blockHeaderSize; switch(litbp.blockType) { case bt_raw: *litStart = ip; ip += litcSize; *litSize = litcSize; break; case bt_rle: { size_t rleSize = litbp.origSize; if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); if (!srcSize) return ERROR(srcSize_wrong); memset(oend - rleSize, *ip, rleSize); *litStart = oend - rleSize; *litSize = rleSize; ip++; break; } case bt_compressed: { size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize); if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize; *litStart = oend - decodedLitSize; *litSize = decodedLitSize; ip += litcSize; break; } case bt_end: default: return ERROR(GENERIC); } return ip-istart; } size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, const void* src, size_t srcSize) { const BYTE* const istart = (const BYTE* const)src; const BYTE* ip = istart; const BYTE* const iend = istart + srcSize; U32 LLtype, Offtype, MLtype; U32 LLlog, Offlog, MLlog; size_t dumpsLength; /* check */ if (srcSize < 5) return ERROR(srcSize_wrong); /* SeqHead */ *nbSeq = ZSTD_readLE16(ip); ip+=2; LLtype = *ip >> 6; Offtype = (*ip >> 4) & 3; MLtype = (*ip >> 2) & 3; if (*ip & 2) { dumpsLength = ip[2]; dumpsLength += ip[1] << 8; ip += 3; } else { dumpsLength = ip[1]; dumpsLength += (ip[0] & 1) << 8; ip += 2; } *dumpsPtr = ip; ip += dumpsLength; *dumpsLengthPtr = dumpsLength; /* check */ if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ /* sequences */ { S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ size_t headerSize; /* Build DTables */ switch(LLtype) { case bt_rle : LLlog = 0; FSE_buildDTable_rle(DTableLL, *ip++); break; case bt_raw : LLlog = LLbits; FSE_buildDTable_raw(DTableLL, LLbits); break; default : { U32 max = MaxLL; headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (LLlog > LLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableLL, norm, max, LLlog); } } switch(Offtype) { case bt_rle : Offlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableOffb, *ip++); break; case bt_raw : Offlog = Offbits; FSE_buildDTable_raw(DTableOffb, Offbits); break; default : { U32 max = MaxOff; headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (Offlog > OffFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableOffb, norm, max, Offlog); } } switch(MLtype) { case bt_rle : MLlog = 0; if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ FSE_buildDTable_rle(DTableML, *ip++); break; case bt_raw : MLlog = MLbits; FSE_buildDTable_raw(DTableML, MLbits); break; default : { U32 max = MaxML; headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); if (FSE_isError(headerSize)) return ERROR(GENERIC); if (MLlog > MLFSELog) return ERROR(corruption_detected); ip += headerSize; FSE_buildDTable(DTableML, norm, max, MLlog); } } } return ip-istart; } typedef struct { size_t litLength; size_t offset; size_t matchLength; } seq_t; typedef struct { FSE_DStream_t DStream; FSE_DState_t stateLL; FSE_DState_t stateOffb; FSE_DState_t stateML; size_t prevOffset; const BYTE* dumps; const BYTE* dumpsEnd; } seqState_t; static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) { size_t litLength; size_t prevOffset; size_t offset; size_t matchLength; const BYTE* dumps = seqState->dumps; const BYTE* const de = seqState->dumpsEnd; /* Literal length */ litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); prevOffset = litLength ? seq->offset : seqState->prevOffset; seqState->prevOffset = seq->offset; if (litLength == MaxLL) { U32 add = dumps 1 byte */ dumps += 3; } } } /* Offset */ { U32 offsetCode, nbBits; offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); nbBits = offsetCode - 1; if (offsetCode==0) nbBits = 0; /* cmove */ offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits); if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); if (offsetCode==0) offset = prevOffset; } /* MatchLength */ matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); if (matchLength == MaxML) { U32 add = dumps 1 byte */ dumps += 3; } } } matchLength += MINMATCH; /* save result */ seq->litLength = litLength; seq->offset = offset; seq->matchLength = matchLength; seqState->dumps = dumps; } static size_t ZSTD_execSequence(BYTE* op, seq_t sequence, const BYTE** litPtr, const BYTE* const litLimit, BYTE* const base, BYTE* const oend) { static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* substracted */ const BYTE* const ostart = op; const size_t litLength = sequence.litLength; BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ const BYTE* const litEnd = *litPtr + litLength; /* check */ if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ if (litEnd > litLimit) return ERROR(corruption_detected); if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ /* copy Literals */ if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) memmove(op, *litPtr, litLength); /* overwrite risk */ else ZSTD_wildcopy(op, *litPtr, litLength); op += litLength; *litPtr = litEnd; /* update for next sequence */ /* check : last match must be at a minimum distance of 8 from end of dest buffer */ if (oend-op < 8) return ERROR(dstSize_tooSmall); /* copy Match */ { const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12); const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */ size_t qutt = 12; U64 saved[2]; /* check */ if (match < base) return ERROR(corruption_detected); if (sequence.offset > (size_t)base) return ERROR(corruption_detected); /* save beginning of literal sequence, in case of write overlap */ if (overlapRisk) { if ((endMatch + qutt) > oend) qutt = oend-endMatch; memcpy(saved, endMatch, qutt); } if (sequence.offset < 8) { const int dec64 = dec64table[sequence.offset]; op[0] = match[0]; op[1] = match[1]; op[2] = match[2]; op[3] = match[3]; match += dec32table[sequence.offset]; ZSTD_copy4(op+4, match); match -= dec64; } else { ZSTD_copy8(op, match); } op += 8; match += 8; if (endMatch > oend-(16-MINMATCH)) { if (op < oend-8) { ZSTD_wildcopy(op, match, (oend-8) - op); match += (oend-8) - op; op = oend-8; } while (opLLTable; U32* DTableML = dctx->MLTable; U32* DTableOffb = dctx->OffTable; BYTE* const base = (BYTE*) (dctx->base); /* Build Decoding Tables */ errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, DTableLL, DTableML, DTableOffb, ip, iend-ip); if (ZSTDv01_isError(errorCode)) return errorCode; ip += errorCode; /* Regen sequences */ { seq_t sequence; seqState_t seqState; memset(&sequence, 0, sizeof(sequence)); seqState.dumps = dumps; seqState.dumpsEnd = dumps + dumpsLength; seqState.prevOffset = 1; errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip); if (FSE_isError(errorCode)) return ERROR(corruption_detected); FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; ) { size_t oneSeqSize; nbSeq--; ZSTD_decodeSequence(&sequence, &seqState); oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; } /* check if reached exact end */ if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ /* last literal segment */ { size_t lastLLSize = litEnd - litPtr; if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); if (op != litPtr) memmove(op, litPtr, lastLLSize); op += lastLLSize; } } return op-ostart; } static size_t ZSTD_decompressBlock( void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { /* blockType == blockCompressed, srcSize is trusted */ const BYTE* ip = (const BYTE*)src; const BYTE* litPtr = NULL; size_t litSize = 0; size_t errorCode; /* Decode literals sub-block */ errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize); if (ZSTDv01_isError(errorCode)) return errorCode; ip += errorCode; srcSize -= errorCode; return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize); } size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; const BYTE* iend = ip + srcSize; BYTE* const ostart = (BYTE* const)dst; BYTE* op = ostart; BYTE* const oend = ostart + maxDstSize; size_t remainingSize = srcSize; U32 magicNumber; size_t errorCode=0; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties); if (ZSTDv01_isError(blockSize)) return blockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (blockSize > remainingSize) return ERROR(srcSize_wrong); switch(blockProperties.blockType) { case bt_compressed: errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize); break; case bt_raw : errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize); break; case bt_rle : return ERROR(GENERIC); /* not yet supported */ break; case bt_end : /* end of frame */ if (remainingSize) return ERROR(srcSize_wrong); break; default: return ERROR(GENERIC); } if (blockSize == 0) break; /* bt_end */ if (ZSTDv01_isError(errorCode)) return errorCode; op += errorCode; ip += blockSize; remainingSize -= blockSize; } return op-ostart; } size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { dctx_t ctx; ctx.base = dst; return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); } size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t srcSize) { const BYTE* ip = (const BYTE*)src; size_t remainingSize = srcSize; U32 magicNumber; blockProperties_t blockProperties; /* Frame Header */ if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; /* Loop on each block */ while (1) { size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTDv01_isError(blockSize)) return blockSize; ip += ZSTD_blockHeaderSize; remainingSize -= ZSTD_blockHeaderSize; if (blockSize > remainingSize) return ERROR(srcSize_wrong); if (blockSize == 0) break; /* bt_end */ ip += blockSize; remainingSize -= blockSize; } return ip - (const BYTE*)src; } /******************************* * Streaming Decompression API *******************************/ size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx) { dctx->expected = ZSTD_frameHeaderSize; dctx->phase = 0; dctx->previousDstEnd = NULL; dctx->base = NULL; return 0; } ZSTDv01_Dctx* ZSTDv01_createDCtx(void) { ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx)); if (dctx==NULL) return NULL; ZSTDv01_resetDCtx(dctx); return dctx; } size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx) { free(dctx); return 0; } size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx) { return ((dctx_t*)dctx)->expected; } size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) { dctx_t* ctx = (dctx_t*)dctx; /* Sanity check */ if (srcSize != ctx->expected) return ERROR(srcSize_wrong); if (dst != ctx->previousDstEnd) /* not contiguous */ ctx->base = dst; /* Decompress : frame header */ if (ctx->phase == 0) { /* Check frame magic header */ U32 magicNumber = ZSTD_readBE32(src); if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; return 0; } /* Decompress : block header */ if (ctx->phase == 1) { blockProperties_t bp; size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTDv01_isError(blockSize)) return blockSize; if (bp.blockType == bt_end) { ctx->expected = 0; ctx->phase = 0; } else { ctx->expected = blockSize; ctx->bType = bp.blockType; ctx->phase = 2; } return 0; } /* Decompress : block content */ { size_t rSize; switch(ctx->bType) { case bt_compressed: rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); break; case bt_raw : rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); break; case bt_rle : return ERROR(GENERIC); /* not yet handled */ break; case bt_end : /* should never happen (filtered at phase 1) */ rSize = 0; break; default: return ERROR(GENERIC); } ctx->phase = 1; ctx->expected = ZSTD_blockHeaderSize; ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); return rSize; } } Index: vendor/zstd/dist/lib/legacy/zstd_v01.h =================================================================== --- vendor/zstd/dist/lib/legacy/zstd_v01.h (revision 325596) +++ vendor/zstd/dist/lib/legacy/zstd_v01.h (revision 325597) @@ -1,88 +1,89 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_V01_H_28739879432 #define ZSTD_V01_H_28739879432 #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Includes ***************************************/ #include /* size_t */ /* ************************************* * Simple one-step function ***************************************/ /** ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format compressedSize : is the exact source size maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. It must be equal or larger than originalSize, otherwise decompression will fail. return : the number of bytes decompressed into destination buffer (originalSize) or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /** ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' return : the number of bytes that would be read to decompress this frame or an errorCode if it fails (which can be tested using ZSTDv01_isError()) */ size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize); /** ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error */ unsigned ZSTDv01_isError(size_t code); /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx; ZSTDv01_Dctx* ZSTDv01_createDCtx(void); size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxOriginalSize, const void* src, size_t compressedSize); /* ************************************* * Streaming functions ***************************************/ size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx); size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx); size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); /** Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */ #define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */ #if defined (__cplusplus) } #endif #endif /* ZSTD_V01_H_28739879432 */ Index: vendor/zstd/dist/lib/legacy/zstd_v02.c =================================================================== --- vendor/zstd/dist/lib/legacy/zstd_v02.c (revision 325596) +++ vendor/zstd/dist/lib/legacy/zstd_v02.c (revision 325597) @@ -1,3555 +1,3556 @@ /* * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #include /* size_t, ptrdiff_t */ #include "zstd_v02.h" #include "error_private.h" /****************************************** * Compiler-specific ******************************************/ #if defined(_MSC_VER) /* Visual Studio */ # include /* _byteswap_ulong */ # include /* _byteswap_* */ #endif /* ****************************************************************** mem.h low-level memory access routines Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef MEM_H_MODULE #define MEM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Includes ******************************************/ #include /* size_t, ptrdiff_t */ #include /* memcpy */ /****************************************** * Compiler-specific ******************************************/ #if defined(__GNUC__) # define MEM_STATIC static __attribute__((unused)) #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define MEM_STATIC static inline #elif defined(_MSC_VER) # define MEM_STATIC static __inline #else # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /**************************************************************** * Basic Types *****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # include typedef uint8_t BYTE; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; #else typedef unsigned char BYTE; typedef unsigned short U16; typedef signed short S16; typedef unsigned int U32; typedef signed int S32; typedef unsigned long long U64; typedef signed long long S64; #endif /**************************************************************** * Memory I/O *****************************************************************/ /* MEM_FORCE_MEMORY_ACCESS * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets generating assembly depending on alignment. * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define MEM_FORCE_MEMORY_ACCESS 2 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define MEM_FORCE_MEMORY_ACCESS 1 # endif #endif MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) /* violates C standard on structure alignment. Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } #else /* default method, safe and standard. can sometimes prove slower */ MEM_STATIC U16 MEM_read16(const void* memPtr) { U16 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U32 MEM_read32(const void* memPtr) { U32 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC U64 MEM_read64(const void* memPtr) { U64 val; memcpy(&val, memPtr, sizeof(val)); return val; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { memcpy(memPtr, &value, sizeof(value)); } #endif // MEM_FORCE_MEMORY_ACCESS MEM_STATIC U16 MEM_readLE16(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read16(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U16)(p[0] + (p[1]<<8)); } } MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) { if (MEM_isLittleEndian()) { MEM_write16(memPtr, val); } else { BYTE* p = (BYTE*)memPtr; p[0] = (BYTE)val; p[1] = (BYTE)(val>>8); } } MEM_STATIC U32 MEM_readLE32(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read32(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); } } MEM_STATIC U64 MEM_readLE64(const void* memPtr) { if (MEM_isLittleEndian()) return MEM_read64(memPtr); else { const BYTE* p = (const BYTE*)memPtr; return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); } } MEM_STATIC size_t MEM_readLEST(const void* memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } #if defined (__cplusplus) } #endif #endif /* MEM_H_MODULE */ /* ****************************************************************** bitstream Part of NewGen Entropy library header file (to include) Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef BITSTREAM_H_MODULE #define BITSTREAM_H_MODULE #if defined (__cplusplus) extern "C" { #endif /* * This API consists of small unitary functions, which highly benefit from being inlined. * Since link-time-optimization is not available for all compilers, * these functions are defined into a .h to be included. */ /********************************************** * bitStream decompression API (read backward) **********************************************/ typedef struct { size_t bitContainer; unsigned bitsConsumed; const char* ptr; const char* start; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, BIT_DStream_endOfBuffer = 1, BIT_DStream_completed = 2, BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); /* * Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. * Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is manually filled from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(size_t))-7) bits when its result is BIT_DStream_unfinished. * Otherwise, it can be less than that, so proceed accordingly. * Checking if DStream has reached its end can be performed with BIT_endOfDStream() */ /****************************************** * unsafe API ******************************************/ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ /**************************************************************** * Helper functions ****************************************************************/ MEM_STATIC unsigned BIT_highbit32 (register U32 val) { # if defined(_MSC_VER) /* Visual */ unsigned long r=0; _BitScanReverse ( &r, val ); return (unsigned) r; # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return 31 - __builtin_clz (val); # else /* Software version */ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; U32 v = val; unsigned r; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; return r; # endif } /********************************************************** * bitStream decoding **********************************************************/ /*!BIT_initDStream * Initialize a BIT_DStream_t. * @bitD : a pointer to an already allocated BIT_DStream_t structure * @srcBuffer must point at the beginning of a bitStream * @srcSize must be the exact size of the bitStream * @result : size of stream (== srcSize) or an errorCode if a problem is detected */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } if (srcSize >= sizeof(size_t)) /* normal case */ { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); bitD->bitContainer = MEM_readLEST(bitD->ptr); contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); } else { U32 contain32; bitD->start = (const char*)srcBuffer; bitD->ptr = bitD->start; bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; default:; } contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ bitD->bitsConsumed = 8 - BIT_highbit32(contain32); bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; } return srcSize; } /*!BIT_lookBits * Provides next n bits from local register * local register is not modified (bits are still present for next read/look) * On 32-bits, maxNbBits==25 * On 64-bits, maxNbBits==57 * @return : value extracted */ MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); } /*! BIT_lookBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) { const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); } MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; } /*!BIT_readBits * Read next n bits from local register. * pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } /*!BIT_readBitsFast : * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t value = BIT_lookBitsFast(bitD, nbBits); BIT_skipBits(bitD, nbBits); return value; } MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ return BIT_DStream_overflow; if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { bitD->ptr -= bitD->bitsConsumed >> 3; bitD->bitsConsumed &= 7; bitD->bitContainer = MEM_readLEST(bitD->ptr); return BIT_DStream_unfinished; } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ result = BIT_DStream_endOfBuffer; } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ return result; } } /*! BIT_endOfDStream * @return Tells if DStream has reached its exact end */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); } #if defined (__cplusplus) } #endif #endif /* BITSTREAM_H_MODULE */ /* ****************************************************************** Error codes and messages Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef ERROR_H_MODULE #define ERROR_H_MODULE #if defined (__cplusplus) extern "C" { #endif /****************************************** * Compiler-specific ******************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define ERR_STATIC static inline #elif defined(_MSC_VER) # define ERR_STATIC static __inline #elif defined(__GNUC__) # define ERR_STATIC static __attribute__((unused)) #else # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif /****************************************** * Error Management ******************************************/ #define PREFIX(name) ZSTD_error_##name #define ERROR(name) (size_t)-PREFIX(name) #define ERROR_LIST(ITEM) \ ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ ITEM(PREFIX(maxCode)) #define ERROR_GENERATE_ENUM(ENUM) ENUM, typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ #define ERROR_CONVERTTOSTRING(STRING) #STRING, #define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC const char* ERR_getErrorName(size_t code) { static const char* codeError = "Unspecified error code"; if (ERR_isError(code)) return ERR_strings[-(int)(code)]; return codeError; } #if defined (__cplusplus) } #endif #endif /* ERROR_H_MODULE */ /* Constructor and Destructor of type FSE_CTable Note that its size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ /* ****************************************************************** FSE : Finite State Entropy coder header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation ******************************************/ /* FSE buffer bounds */ #define FSE_NCOUNTBOUND 512 #define FSE_BLOCKBOUND(size) (size + (size>>7)) #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= BIT_DStream_completed When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. Checking if DStream has reached its end is performed by : BIT_endOfDStream(&DStream); Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. FSE_endOfDState(&DState); */ /****************************************** * FSE unsafe API ******************************************/ static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ /****************************************** * Implementation of inline functions ******************************************/ /* decompression */ typedef struct { U16 tableLog; U16 fastMode; } FSE_DTableHeader; /* sizeof U32 */ typedef struct { unsigned short newState; unsigned char symbol; unsigned char nbBits; } FSE_decode_t; /* size == U32 */ MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); BIT_reloadDStream(bitD); DStatePtr->table = dt + 1; } MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBits(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) { const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; const U32 nbBits = DInfo.nbBits; BYTE symbol = DInfo.symbol; size_t lowBits = BIT_readBitsFast(bitD, nbBits); DStatePtr->state = DInfo.newState + lowBits; return symbol; } MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) { return DStatePtr->state == 0; } #if defined (__cplusplus) } #endif /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library header file for static linking (only) Copyright (C) 2013-2015, Yann Collet BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - Source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #if defined (__cplusplus) extern "C" { #endif /****************************************** * Static allocation macros ******************************************/ /* Huff0 buffer bounds */ #define HUF_CTABLEBOUND 129 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of Huff0's DTable */ #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ /* ************************************* * Version ***************************************/ #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ #define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ #define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /* ************************************* * Advanced functions ***************************************/ typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ #if defined (__cplusplus) } #endif /* zstd - standard compression library Header File for static linking only Copyright (C) 2014-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - zstd source repository : https://github.com/Cyan4973/zstd - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c */ /* The objects defined into this file should be considered experimental. * They are not labelled stable, as their prototype may change in the future. * You can use them for tests, provide feedback, or if you can endure risk of future changes. */ #if defined (__cplusplus) extern "C" { #endif /* ************************************* * Streaming functions ***************************************/ typedef struct ZSTD_DCtx_s ZSTD_DCtx; /* Use above functions alternatively. ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. Result is the number of bytes regenerated within 'dst'. It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. */ /* ************************************* * Prefix - version detection ***************************************/ #define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ #if defined (__cplusplus) } #endif /* ****************************************************************** FSE : Finite State Entropy coder Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ #ifndef FSE_COMMONDEFS_ONLY /**************************************************************** * Tuning parameters ****************************************************************/ /* MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #define FSE_MAX_MEMORY_USAGE 14 #define FSE_DEFAULT_MEMORY_USAGE 13 /* FSE_MAX_SYMBOL_VALUE : * Maximum symbol value authorized. * Required for proper stack allocation */ #define FSE_MAX_SYMBOL_VALUE 255 /**************************************************************** * template functions type & suffix ****************************************************************/ #define FSE_FUNCTION_TYPE BYTE #define FSE_FUNCTION_EXTENSION /**************************************************************** * Byte symbol type ****************************************************************/ #endif /* !FSE_COMMONDEFS_ONLY */ /**************************************************************** * Compiler specifics ****************************************************************/ #ifdef _MSC_VER /* Visual Studio */ # define FORCE_INLINE static __forceinline # include /* For Visual 2005 */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ #else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # ifdef __GNUC__ # define FORCE_INLINE static inline __attribute__((always_inline)) # else # define FORCE_INLINE static inline # endif # else # define FORCE_INLINE static # endif /* __STDC_VERSION__ */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Constants *****************************************************************/ #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) #define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" #endif /**************************************************************** * Error Management ****************************************************************/ #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /**************************************************************** * Complex types ****************************************************************/ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; /**************************************************************** * Templates ****************************************************************/ /* designed to be included for type-specific functions (template emulation in C) Objective is to write these functions only once, for improved maintenance */ /* safety checks */ #ifndef FSE_FUNCTION_EXTENSION # error "FSE_FUNCTION_EXTENSION must be defined" #endif #ifndef FSE_FUNCTION_TYPE # error "FSE_FUNCTION_TYPE must be defined" #endif /* Function names */ #define FSE_CAT(X,Y) X##Y #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) /* Function templates */ #define FSE_DECODE_TYPE FSE_decode_t static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } static size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { void* ptr = dt+1; FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; FSE_DTableHeader DTableH; const U32 tableSize = 1 << tableLog; const U32 tableMask = tableSize-1; const U32 step = FSE_tableStep(tableSize); U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; U32 position = 0; U32 highThreshold = tableSize-1; const S16 largeLimit= (S16)(1 << (tableLog-1)); U32 noLarge = 1; U32 s; /* Sanity Checks */ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ DTableH.tableLog = (U16)tableLog; for (s=0; s<=maxSymbolValue; s++) { if (normalizedCounter[s]==-1) { tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; symbolNext[s] = 1; } else { if (normalizedCounter[s] >= largeLimit) noLarge=0; symbolNext[s] = normalizedCounter[s]; } } /* Spread symbols */ for (s=0; s<=maxSymbolValue; s++) { int i; for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ } } if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ /* Build Decoding table */ { U32 i; for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); bitStream >>= 4; bitCount = 4; *tableLogPtr = nbBits; remaining = (1<1) && (charnum<=*maxSVPtr)) { if (previous0) { unsigned n0 = charnum; while ((bitStream & 0xFFFF) == 0xFFFF) { n0+=24; if (ip < iend-5) { ip+=2; bitStream = MEM_readLE32(ip) >> bitCount; } else { bitStream >>= 16; bitCount+=16; } } while ((bitStream & 3) == 3) { n0+=3; bitStream>>=2; bitCount+=2; } n0 += bitStream & 3; bitCount += 2; if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; } else bitStream >>= 2; } { const short max = (short)((2*threshold-1)-remaining); short count; if ((bitStream & (threshold-1)) < (U32)max) { count = (short)(bitStream & (threshold-1)); bitCount += nbBits-1; } else { count = (short)(bitStream & (2*threshold-1)); if (count >= threshold) count -= max; bitCount += nbBits; } count--; /* extra accuracy */ remaining -= FSE_abs(count); normalizedCounter[charnum++] = count; previous0 = !count; while (remaining < threshold) { nbBits--; threshold >>= 1; } { if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { ip += bitCount>>3; bitCount &= 7; } else { bitCount -= (int)(8 * (iend - 4 - ip)); ip = iend - 4; } bitStream = MEM_readLE32(ip) >> (bitCount & 31); } } } if (remaining != 1) return ERROR(GENERIC); *maxSVPtr = charnum-1; ip += (bitCount+7)>>3; if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); return ip-istart; } /********************************************************* * Decompression (Byte symbols) *********************************************************/ static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ DTableH->tableLog = 0; DTableH->fastMode = 0; cell->newState = 0; cell->symbol = symbolValue; cell->nbBits = 0; return 0; } static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) { void* ptr = dt; FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ const unsigned tableSize = 1 << nbBits; const unsigned tableMask = tableSize - 1; const unsigned maxSymbolValue = tableMask; unsigned s; /* Sanity checks */ if (nbBits < 1) return ERROR(GENERIC); /* min size */ /* Build Decoding Table */ DTableH->tableLog = (U16)nbBits; DTableH->fastMode = 1; for (s=0; s<=maxSymbolValue; s++) { dinfo[s].newState = 0; dinfo[s].symbol = (BYTE)s; dinfo[s].nbBits = (BYTE)nbBits; } return 0; } FORCE_INLINE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const omax = op + maxDstSize; BYTE* const olimit = omax-3; BIT_DStream_t bitD; FSE_DState_t state1; FSE_DState_t state2; size_t errorCode; /* Init */ errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ if (FSE_isError(errorCode)) return errorCode; FSE_initDState(&state1, &bitD, dt); FSE_initDState(&state2, &bitD, dt); #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) /* 4 symbols per loop */ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[1] = FSE_GETSYMBOL(&state2); if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } op[2] = FSE_GETSYMBOL(&state1); if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ BIT_reloadDStream(&bitD); op[3] = FSE_GETSYMBOL(&state2); } /* tail */ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ while (1) { if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) break; *op++ = FSE_GETSYMBOL(&state1); if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) break; *op++ = FSE_GETSYMBOL(&state2); } /* end ? */ if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) return op-ostart; if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ return ERROR(corruption_detected); } static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt) { FSE_DTableHeader DTableH; memcpy(&DTableH, dt, sizeof(DTableH)); /* select fast mode (static) */ if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); } static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) { const BYTE* const istart = (const BYTE*)cSrc; const BYTE* ip = istart; short counting[FSE_MAX_SYMBOL_VALUE+1]; DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; size_t errorCode; if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ /* normal FSE decoding mode */ errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); if (FSE_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ ip += errorCode; cSrcSize -= errorCode; errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); if (FSE_isError(errorCode)) return errorCode; /* always return, even if it is an error code */ return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); } #endif /* FSE_COMMONDEFS_ONLY */ /* ****************************************************************** Huff0 : Huffman coder, part of New Generation Entropy library Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /**************************************************************** * Compiler specifics ****************************************************************/ #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) /* inline is defined */ #elif defined(_MSC_VER) # define inline __inline #else # define inline /* disable inline */ #endif #ifdef _MSC_VER /* Visual Studio */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif /**************************************************************** * Includes ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ #include /* printf (debug) */ /**************************************************************** * Error Management ****************************************************************/ #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /****************************************** * Helper functions ******************************************/ static unsigned HUF_isError(size_t code) { return ERR_isError(code); } #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ #define HUF_MAX_SYMBOL_VALUE 255 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) # error "HUF_MAX_TABLELOG is too large !" #endif /********************************************************* * Huff0 : Huffman block decompression *********************************************************/ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; /*! HUF_readStats Read compact Huffman tree, saved by HUF_writeCTable @huffWeight : destination buffer @return : size read from `src` */ static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize) { U32 weightTotal; U32 tableLog; const BYTE* ip = (const BYTE*) src; size_t iSize; size_t oSize; U32 n; if (!srcSize) return ERROR(srcSize_wrong); iSize = ip[0]; //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ if (iSize >= 128) /* special header */ { if (iSize >= (242)) /* RLE */ { static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; oSize = l[iSize-242]; memset(huffWeight, 1, hwSize); iSize = 0; } else /* Incompressible */ { oSize = iSize - 127; iSize = ((oSize+1)/2); if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; } } } else /* header compressed with FSE (normal case) */ { if (iSize+1 > srcSize) return ERROR(srcSize_wrong); oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ if (FSE_isError(oSize)) return oSize; } /* collect weight stats */ memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); weightTotal = 0; for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); { U32 total = 1 << tableLog; U32 rest = total - weightTotal; U32 verif = 1 << BIT_highbit32(rest); U32 lastWeight = BIT_highbit32(rest) + 1; if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ huffWeight[oSize] = (BYTE)lastWeight; rankStats[lastWeight]++; } /* check tree construction validity */ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ /* results */ *nbSymbolsPtr = (U32)(oSize+1); *tableLogPtr = tableLog; return iSize+1; } /**************************/ /* single-symbol decoding */ /**************************/ static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) { BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; U32 nbSymbols = 0; U32 n; U32 nextRankStart; void* ptr = DTable+1; HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ /* Prepare ranks */ nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 current = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); rankVal[n] = current; } /* fill DTable */ for (n=0; n> 1; U32 i; HUF_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (i = rankVal[w]; i < rankVal[w] + length; i++) dt[i] = D; rankVal[w] += length; } return iSize; } static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ const BYTE c = dt[val].byte; BIT_skipBits(Dstream, dt[val].nbBits); return c; } #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_1(p, bitDPtr); HUF_DECODE_SYMBOLX2_2(p, bitDPtr); HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, hence no need to reload */ while (p < pEnd) HUF_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } static size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U16* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_1(op1, &bitD1); HUF_DECODE_SYMBOLX2_1(op2, &bitD2); HUF_DECODE_SYMBOLX2_1(op3, &bitD3); HUF_DECODE_SYMBOLX2_1(op4, &bitD4); HUF_DECODE_SYMBOLX2_2(op1, &bitD1); HUF_DECODE_SYMBOLX2_2(op2, &bitD2); HUF_DECODE_SYMBOLX2_2(op3, &bitD3); HUF_DECODE_SYMBOLX2_2(op4, &bitD4); HUF_DECODE_SYMBOLX2_0(op1, &bitD1); HUF_DECODE_SYMBOLX2_0(op2, &bitD2); HUF_DECODE_SYMBOLX2_0(op3, &bitD3); HUF_DECODE_SYMBOLX2_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t errorCode; errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); if (HUF_isError(errorCode)) return errorCode; if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); ip += errorCode; cSrcSize -= errorCode; return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /***************************/ /* double-symbols decoding */ /***************************/ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { HUF_DEltX4 DElt; U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; U32 s; /* get pre-calculated rankVal */ memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill skipped values */ if (minWeight>1) { U32 i, skipSize = rankVal[minWeight]; MEM_writeLE16(&(DElt.sequence), baseSeq); DElt.nbBits = (BYTE)(consumed); DElt.length = 1; for (i = 0; i < skipSize; i++) DTable[i] = DElt; } /* fill DTable */ for (s=0; s= 1 */ rankVal[weight] += length; } } typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; U32 s; memcpy(rankVal, rankValOrigin, sizeof(rankVal)); /* fill DTable */ for (s=0; s= minBits) /* enough room for a second symbol */ { U32 sortedRank; int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { U32 i; const U32 end = start + length; HUF_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; for (i = start; i < end; i++) DTable[i] = DElt; } rankVal[weight] += length; } } static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) { BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; U32* const rankStart = rankStart0+1; rankVal_t rankVal; U32 tableLog, maxW, sizeOfSort, nbSymbols; const U32 memLog = DTable[0]; const BYTE* ip = (const BYTE*) src; size_t iSize = ip[0]; void* ptr = DTable; HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ /* find maxWeight */ for (maxW = tableLog; rankStats[maxW]==0; maxW--) {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ /* Get start index of each weight */ { U32 w, nextRankStart = 0; for (w=1; w<=maxW; w++) { U32 current = nextRankStart; nextRankStart += rankStats[w]; rankStart[w] = current; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ sizeOfSort = nextRankStart; } /* sort symbols by weight */ { U32 s; for (s=0; s> consumed; } } } HUF_fillDTableX4(dt, memLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); return iSize; } static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) { const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ memcpy(op, dt+val, 1); if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ } } return 1; } #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 8 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) { HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_1(p, bitDPtr); HUF_DECODE_SYMBOLX4_2(p, bitDPtr); HUF_DECODE_SYMBOLX4_0(p, bitDPtr); } /* closer to the end */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); while (p <= pEnd-2) HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ if (p < pEnd) p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); return p-pStart; } static size_t HUF_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const U32* DTable) { if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* ptr = DTable; const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; const U32 dtLog = DTable[0]; size_t errorCode; /* Init */ BIT_DStream_t bitD1; BIT_DStream_t bitD2; BIT_DStream_t bitD3; BIT_DStream_t bitD4; const size_t length1 = MEM_readLE16(istart); const size_t length2 = MEM_readLE16(istart+2); const size_t length3 = MEM_readLE16(istart+4); size_t length4; const BYTE* const istart1 = istart + 6; /* jumpTable */ const BYTE* const istart2 = istart1 + length1; const BYTE* const istart3 = istart2 + length2; const BYTE* const istart4 = istart3 + length3; const size_t segmentSize = (dstSize+3) / 4; BYTE* const opStart2 = ostart + segmentSize; BYTE* const opStart3 = opStart2 + segmentSize; BYTE* const opStart4 = opStart3 + segmentSize; BYTE* op1 = ostart; BYTE* op2 = opStart2; BYTE* op3 = opStart3; BYTE* op4 = opStart4; U32 endSignal; length4 = cSrcSize - (length1 + length2 + length3 + 6); if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ errorCode = BIT_initDStream(&bitD1, istart1, length1); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD2, istart2, length2); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD3, istart3, length3); if (HUF_isError(errorCode)) return errorCode; errorCode = BIT_initDStream(&bitD4, istart4, length4); if (HUF_isError(errorCode)) return errorCode; /* 16-32 symbols per loop (4-8 symbols per stream) */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_1(op1, &bitD1); HUF_DECODE_SYMBOLX4_1(op2, &bitD2); HUF_DECODE_SYMBOLX4_1(op3, &bitD3); HUF_DECODE_SYMBOLX4_1(op4, &bitD4); HUF_DECODE_SYMBOLX4_2(op1, &bitD1); HUF_DECODE_SYMBOLX4_2(op2, &bitD2); HUF_DECODE_SYMBOLX4_2(op3, &bitD3); HUF_DECODE_SYMBOLX4_2(op4, &bitD4); HUF_DECODE_SYMBOLX4_0(op1, &bitD1); HUF_DECODE_SYMBOLX4_0(op2, &bitD2); HUF_DECODE_SYMBOLX4_0(op3, &bitD3); HUF_DECODE_SYMBOLX4_0(op4, &bitD4); endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); } /* check corruption */ if (op1 > opStart2) return ERROR(corruption_detected); if (op2 > opStart3) return ERROR(corruption_detected); if (op3 > opStart4) return ERROR(corruption_detected); /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); if (!endSignal) return ERROR(corruption_detected); /* decoded size */ return dstSize; } } static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); const BYTE* ip = (const BYTE*) cSrc; size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); } /**********************************/ /* quad-symbol decoding */ /**********************************/ typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; /* recursive, up to level 3; may benefit from