Index: head/sys/boot/efi/boot1/Makefile
===================================================================
--- head/sys/boot/efi/boot1/Makefile	(revision 304320)
+++ head/sys/boot/efi/boot1/Makefile	(revision 304321)
@@ -1,136 +1,141 @@
 # $FreeBSD$
 
 MAN=
 
 .include <src.opts.mk>
 
 MK_SSP=		no
 
 PROG=		boot1.sym
 INTERNALPROG=
 WARNS?=		6
 
 .if ${MK_ZFS} != "no"
 # Disable warnings that are currently incompatible with the zfs boot code
 CWARNFLAGS.zfs_module.c += -Wno-array-bounds
 CWARNFLAGS.zfs_module.c += -Wno-cast-align
 CWARNFLAGS.zfs_module.c += -Wno-cast-qual
 CWARNFLAGS.zfs_module.c += -Wno-missing-prototypes
 CWARNFLAGS.zfs_module.c += -Wno-sign-compare
 CWARNFLAGS.zfs_module.c += -Wno-unused-parameter
 CWARNFLAGS.zfs_module.c += -Wno-unused-function
+CWARNFLAGS.skein.c += -Wno-cast-align
+CWARNFLAGS.skein.c += -Wno-missing-variable-declarations
 .endif
 
 # architecture-specific loader code
 SRCS=	boot1.c self_reloc.c start.S ufs_module.c
 .if ${MK_ZFS} != "no"
 SRCS+=		zfs_module.c
+SRCS+=		skein.c skein_block.c
+.PATH:		${.CURDIR}/../../../crypto/skein
 .endif
 
 CFLAGS+=	-I.
 CFLAGS+=	-I${.CURDIR}/../include
 CFLAGS+=	-I${.CURDIR}/../include/${MACHINE}
 CFLAGS+=	-I${.CURDIR}/../../../contrib/dev/acpica/include
 CFLAGS+=	-I${.CURDIR}/../../..
 CFLAGS+=	-DEFI_UFS_BOOT
 .ifdef(EFI_DEBUG)
 CFLAGS+=	-DEFI_DEBUG
 .endif
 
 .if ${MK_ZFS} != "no"
 CFLAGS+=	-I${.CURDIR}/../../zfs/
 CFLAGS+=	-I${.CURDIR}/../../../cddl/boot/zfs/
+CFLAGS+=	-I${.CURDIR}/../../../crypto/skein
 CFLAGS+=	-DEFI_ZFS_BOOT
 .endif
 
 # Always add MI sources and REGULAR efi loader bits
 .PATH:		${.CURDIR}/../loader/arch/${MACHINE}
 .PATH:		${.CURDIR}/../loader
 .PATH:		${.CURDIR}/../../common
 CFLAGS+=	-I${.CURDIR}/../../common
 
 FILES=	boot1.efi boot1.efifat
 FILESMODE_boot1.efi=	${BINMODE}
 
 LDSCRIPT=	${.CURDIR}/../loader/arch/${MACHINE}/ldscript.${MACHINE}
 LDFLAGS+=	-Wl,-T${LDSCRIPT} -Wl,-Bsymbolic -shared
 
 .if ${MACHINE_CPUARCH} == "aarch64"
 CFLAGS+=	-msoft-float -mgeneral-regs-only
 .endif
 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
 CFLAGS+=	-fPIC
 LDFLAGS+=	-Wl,-znocombreloc
 .endif
 
 #
 # Add libstand for the runtime functions used by the compiler - for example
 # __aeabi_* (arm) or __divdi3 (i386).
 # as well as required string and memory functions for all platforms.
 #
 DPADD+=		${LIBSTAND}
 LDADD+=		-lstand
 
 DPADD+=		${LDSCRIPT}
 
 NM?=		nm
 OBJCOPY?=	objcopy
 
 .if ${MACHINE_CPUARCH} == "amd64"
 EFI_TARGET=	efi-app-x86_64
 .elif ${MACHINE_CPUARCH} == "i386"
 EFI_TARGET=	efi-app-ia32
 .else
 EFI_TARGET=	binary
 .endif
 
 boot1.efi: ${PROG}
 	if ${NM} ${.ALLSRC} | grep ' U '; then \
 		echo "Undefined symbols in ${.ALLSRC}"; \
 		exit 1; \
 	fi
 	${OBJCOPY} -j .peheader -j .text -j .sdata -j .data \
 		-j .dynamic -j .dynsym -j .rel.dyn \
 		-j .rela.dyn -j .reloc -j .eh_frame \
 		--output-target=${EFI_TARGET} ${.ALLSRC} ${.TARGET}
 
 boot1.o: ${.CURDIR}/../../common/ufsread.c
 
 # The following inserts our objects into a template FAT file system
 # created by generate-fat.sh
 
 .include "${.CURDIR}/Makefile.fat"
 BOOT1_MAXSIZE?=	131072
 
 boot1.efifat: boot1.efi
 	@set -- `ls -l boot1.efi`; \
 	x=$$(($$5-${BOOT1_MAXSIZE})); \
 	if [ $$x -ge 0 ]; then \
 	    echo "boot1 $$x bytes too large; regenerate FAT templates?" >&2 ;\
 	    exit 1; \
 	fi
 	echo ${.OBJDIR}
 	uudecode ${.CURDIR}/fat-${MACHINE}.tmpl.bz2.uu
 	mv fat-${MACHINE}.tmpl.bz2 ${.TARGET}.bz2
 	bzip2 -f -d ${.TARGET}.bz2
 	dd if=boot1.efi of=${.TARGET} seek=${BOOT1_OFFSET} conv=notrunc \
 	    status=none
 
 CLEANFILES= boot1.efi boot1.efifat
 
 .include <bsd.prog.mk>
 
 beforedepend ${OBJS}: machine
 
 CLEANFILES+=   machine
 
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../../${MACHINE}/include machine
 
 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
 beforedepend ${OBJS}: x86
 CLEANFILES+=   x86
 
 x86: .NOMETA
 	ln -sf ${.CURDIR}/../../../x86/include x86
 .endif
Index: head/sys/boot/efi/loader/Makefile
===================================================================
--- head/sys/boot/efi/loader/Makefile	(revision 304320)
+++ head/sys/boot/efi/loader/Makefile	(revision 304321)
@@ -1,162 +1,165 @@
 # $FreeBSD$
 
 MAN=
 
 .include <src.opts.mk>
 
 MK_SSP=		no
 
 PROG=		loader.sym
 INTERNALPROG=
 WARNS?=		3
 
 # architecture-specific loader code
 SRCS=	autoload.c \
 	bootinfo.c \
 	conf.c \
 	copy.c \
 	devicename.c \
 	main.c \
 	self_reloc.c \
 	smbios.c \
 	vers.c
 
 .if ${MK_ZFS} != "no"
 SRCS+=		zfs.c
 .PATH:		${.CURDIR}/../../zfs
+SRCS+=		skein.c skein_block.c
+.PATH:		${.CURDIR}/../../../crypto/skein
 
 # Disable warnings that are currently incompatible with the zfs boot code
 CWARNFLAGS.zfs.c+=	-Wno-sign-compare
 CWARNFLAGS.zfs.c+=	-Wno-array-bounds
 CWARNFLAGS.zfs.c+=	-Wno-missing-prototypes
 .endif
 
 # We implement a slightly non-standard %S in that it always takes a
 # CHAR16 that's common in UEFI-land instead of a wchar_t. This only
 # seems to matter on arm64 where wchar_t defaults to an int instead
 # of a short. There's no good cast to use here so just ignore the
 # warnings for now.
 CWARNFLAGS.main.c+=	-Wno-format
 
 .PATH: ${.CURDIR}/arch/${MACHINE}
 # For smbios.c
 .PATH: ${.CURDIR}/../../i386/libi386
 .include "${.CURDIR}/arch/${MACHINE}/Makefile.inc"
 
 CFLAGS+=	-I${.CURDIR}
 CFLAGS+=	-I${.CURDIR}/arch/${MACHINE}
 CFLAGS+=	-I${.CURDIR}/../include
 CFLAGS+=	-I${.CURDIR}/../include/${MACHINE}
 CFLAGS+=	-I${.CURDIR}/../../../contrib/dev/acpica/include
 CFLAGS+=	-I${.CURDIR}/../../..
 CFLAGS+=	-I${.CURDIR}/../../i386/libi386
 .if ${MK_ZFS} != "no"
 CFLAGS+=	-I${.CURDIR}/../../zfs
 CFLAGS+=	-I${.CURDIR}/../../../cddl/boot/zfs
+CFLAGS+=	-I${.CURDIR}/../../../crypto/skein
 CFLAGS+=	-DEFI_ZFS_BOOT
 .endif
 CFLAGS+=	-DNO_PCI -DEFI
 
 # make buildenv doesn't set DESTDIR, this means LIBSTAND
 # will be wrong when crossbuilding.
 .if exists(${.OBJDIR}/../../../../lib/libstand/libstand.a)
 LIBSTAND=	${.OBJDIR}/../../../../lib/libstand/libstand.a
 .endif
 
 .if !defined(BOOT_HIDE_SERIAL_NUMBERS)
 # Export serial numbers, UUID, and asset tag from loader.
 CFLAGS+= -DSMBIOS_SERIAL_NUMBERS
 .if defined(BOOT_LITTLE_ENDIAN_UUID)
 # Use little-endian UUID format as defined in SMBIOS 2.6.
 CFLAGS+= -DSMBIOS_LITTLE_ENDIAN_UUID
 .elif defined(BOOT_NETWORK_ENDIAN_UUID)
 # Use network-endian UUID format for backward compatibility.
 CFLAGS+= -DSMBIOS_NETWORK_ENDIAN_UUID
 .endif
 .endif
 
 .if ${MK_FORTH} != "no"
 BOOT_FORTH=	yes
 CFLAGS+=	-DBOOT_FORTH
 CFLAGS+=	-I${.CURDIR}/../../ficl
 CFLAGS+=	-I${.CURDIR}/../../ficl/${MACHINE_CPUARCH}
 LIBFICL=	${.OBJDIR}/../../ficl/libficl.a
 .endif
 
 LOADER_FDT_SUPPORT?=	no
 .if ${MK_FDT} != "no" && ${LOADER_FDT_SUPPORT} != "no"
 CFLAGS+=	-I${.CURDIR}/../../fdt
 CFLAGS+=	-I${.OBJDIR}/../../fdt
 CFLAGS+=	-DLOADER_FDT_SUPPORT
 LIBEFI_FDT=	${.OBJDIR}/../../efi/fdt/libefi_fdt.a
 LIBFDT=		${.OBJDIR}/../../fdt/libfdt.a
 .endif
 
 # Include bcache code.
 HAVE_BCACHE=    yes
 
 .if defined(EFI_STAGING_SIZE)
 CFLAGS+=	-DEFI_STAGING_SIZE=${EFI_STAGING_SIZE}
 .endif
 
 # Always add MI sources
 .PATH:		${.CURDIR}/../../common
 .include	"${.CURDIR}/../../common/Makefile.inc"
 CFLAGS+=	-I${.CURDIR}/../../common
 
 FILES+=	loader.efi
 FILESMODE_loader.efi=	${BINMODE}
 
 LDSCRIPT=	${.CURDIR}/arch/${MACHINE}/ldscript.${MACHINE}
 LDFLAGS+=	-Wl,-T${LDSCRIPT} -Wl,-Bsymbolic -shared
 
 CLEANFILES+=	vers.c loader.efi
 
 NEWVERSWHAT=	"EFI loader" ${MACHINE}
 
 vers.c:	${.CURDIR}/../../common/newvers.sh ${.CURDIR}/../../efi/loader/version
 	sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
 
 NM?=		nm
 OBJCOPY?=	objcopy
 
 .if ${MACHINE_CPUARCH} == "amd64"
 EFI_TARGET=	efi-app-x86_64
 .elif ${MACHINE_CPUARCH} == "i386"
 EFI_TARGET=	efi-app-ia32
 .else
 EFI_TARGET=	binary
 .endif
 
 loader.efi: ${PROG}
 	if ${NM} ${.ALLSRC} | grep ' U '; then \
 		echo "Undefined symbols in ${.ALLSRC}"; \
 		exit 1; \
 	fi
 	${OBJCOPY} -j .peheader -j .text -j .sdata -j .data \
 		-j .dynamic -j .dynsym -j .rel.dyn \
 		-j .rela.dyn -j .reloc -j .eh_frame -j set_Xcommand_set \
 		--output-target=${EFI_TARGET} ${.ALLSRC} ${.TARGET}
 
 LIBEFI=		${.OBJDIR}/../libefi/libefi.a
 
 DPADD=		${LIBFICL} ${LIBEFI} ${LIBFDT} ${LIBEFI_FDT} ${LIBSTAND} \
 		${LDSCRIPT}
 LDADD=		${LIBFICL} ${LIBEFI} ${LIBFDT} ${LIBEFI_FDT} ${LIBSTAND}
 
 .include <bsd.prog.mk>
 
 beforedepend ${OBJS}: machine
 
 CLEANFILES+=   machine
 
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../../${MACHINE}/include machine
 
 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
 beforedepend ${OBJS}: x86
 CLEANFILES+=   x86
 
 x86: .NOMETA
 	ln -sf ${.CURDIR}/../../../x86/include x86
 .endif
Index: head/sys/boot/i386/boot2/Makefile
===================================================================
--- head/sys/boot/i386/boot2/Makefile	(revision 304320)
+++ head/sys/boot/i386/boot2/Makefile	(revision 304321)
@@ -1,117 +1,117 @@
 # $FreeBSD$
 
 .include <bsd.own.mk>
 
 FILES=		boot boot1 boot2
 
 NM?=		nm
 
 # A value of 0x80 enables LBA support.
 BOOT_BOOT1_FLAGS?=	0x80
 
 BOOT_COMCONSOLE_PORT?= 0x3f8
 BOOT_COMCONSOLE_SPEED?= 9600
 B2SIOFMT?=	0x3
 
 REL1=	0x700
 ORG1=	0x7c00
 ORG2=	0x2000
 
 # Decide level of UFS support.
 BOOT2_UFS?=	UFS1_AND_UFS2
 #BOOT2_UFS?=	UFS2_ONLY
 #BOOT2_UFS?=	UFS1_ONLY
 
 CFLAGS=	-fomit-frame-pointer \
 	-mrtd \
 	-mregparm=3 \
 	-DUSE_XREAD \
 	-D${BOOT2_UFS} \
 	-DFLAGS=${BOOT_BOOT1_FLAGS} \
 	-DSIOPRT=${BOOT_COMCONSOLE_PORT} \
 	-DSIOFMT=${B2SIOFMT} \
 	-DSIOSPD=${BOOT_COMCONSOLE_SPEED} \
 	-I${.CURDIR}/../../common \
 	-I${.CURDIR}/../btx/lib -I. \
-	-Wall -Waggregate-return -Wbad-function-cast -Wcast-align \
+	-Wall -Waggregate-return -Wbad-function-cast -Wno-cast-align \
 	-Wmissing-declarations -Wmissing-prototypes -Wnested-externs \
 	-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
 	-Winline
 
 CFLAGS.gcc+=	-Os \
 		-fno-guess-branch-probability \
 		-fno-unit-at-a-time \
 		--param max-inline-insns-single=100
 .if ${COMPILER_TYPE} == "gcc" && ${COMPILER_VERSION} <= 40201
 CFLAGS.gcc+=   -mno-align-long-strings
 .endif
 
 CFLAGS.clang+=	-Oz ${CLANG_OPT_SMALL}
 
 LD_FLAGS=-static -N --gc-sections
 
 # Pick up ../Makefile.inc early.
 .include <bsd.init.mk>
 
 CLEANFILES=	boot
 
 boot: boot1 boot2
 	cat boot1 boot2 > boot
 
 CLEANFILES+=	boot1 boot1.out boot1.o
 
 boot1: boot1.out
 	${OBJCOPY} -S -O binary boot1.out ${.TARGET}
 
 boot1.out: boot1.o
 	${LD} ${LD_FLAGS} -e start -Ttext ${ORG1} -o ${.TARGET} boot1.o
 
 CLEANFILES+=	boot2 boot2.ld boot2.ldr boot2.bin boot2.out boot2.o \
 		boot2.s boot2.s.tmp boot2.h sio.o
 
 boot2: boot2.ld
 	@set -- `ls -l boot2.ld`; x=$$((7680-$$5)); \
 	    echo "$$x bytes available"; test $$x -ge 0
 	dd if=boot2.ld of=${.TARGET} obs=7680 conv=osync status=none
 
 boot2.ld: boot2.ldr boot2.bin ${BTXKERN}
 	btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l boot2.ldr \
 	    -o ${.TARGET} -P 1 boot2.bin
 
 boot2.ldr:
 	dd if=/dev/zero of=${.TARGET} bs=512 count=1 status=none
 
 boot2.bin: boot2.out
 	${OBJCOPY} -S -O binary boot2.out ${.TARGET}
 
 boot2.out: ${BTXCRT} boot2.o sio.o
 	${LD} ${LD_FLAGS} -Ttext ${ORG2} -o ${.TARGET} ${.ALLSRC}
 
 boot2.o: boot2.s
 	${CC} ${ACFLAGS} -c boot2.s
 
 SRCS=	boot2.c boot2.h
 
 boot2.s: boot2.c boot2.h ${.CURDIR}/../../common/ufsread.c
 	${CC} ${CFLAGS} -S -o boot2.s.tmp ${.CURDIR}/boot2.c
 	sed -e '/align/d' -e '/nop/d' < boot2.s.tmp > boot2.s
 	rm -f boot2.s.tmp
 
 boot2.h: boot1.out
 	${NM} -t d ${.ALLSRC} | awk '/([0-9])+ T xread/ \
 	    { x = $$1 - ORG1; \
 	    printf("#define XREADORG %#x\n", REL1 + x) }' \
 	    ORG1=`printf "%d" ${ORG1}` \
 	    REL1=`printf "%d" ${REL1}` > ${.TARGET}
 
 .if ${MACHINE_CPUARCH} == "amd64"
 beforedepend boot2.s: machine
 CLEANFILES+=	machine
 machine: ${.CURDIR}/../../../i386/include .NOMETA
 	ln -sf ${.ALLSRC} ${.TARGET}
 .endif
 
 .include <bsd.prog.mk>
 
 # XXX: clang integrated-as doesn't grok .codeNN directives yet
 CFLAGS.boot1.S=		${CLANG_NO_IAS}
Index: head/sys/boot/i386/gptboot/Makefile
===================================================================
--- head/sys/boot/i386/gptboot/Makefile	(revision 304320)
+++ head/sys/boot/i386/gptboot/Makefile	(revision 304321)
@@ -1,92 +1,92 @@
 # $FreeBSD$
 
 .PATH:		${.CURDIR}/../boot2 ${.CURDIR}/../common ${.CURDIR}/../../common
 
 FILES=		gptboot
 MAN=		gptboot.8
 
 NM?=		nm
 
 BOOT_COMCONSOLE_PORT?= 0x3f8
 BOOT_COMCONSOLE_SPEED?= 9600
 B2SIOFMT?=	0x3
 
 REL1=	0x700
 ORG1=	0x7c00
 ORG2=	0x0
 
 # Decide level of UFS support.
 GPTBOOT_UFS?=	UFS1_AND_UFS2
 #GPTBOOT_UFS?=	UFS2_ONLY
 #GPTBOOT_UFS?=	UFS1_ONLY
 
 CFLAGS=	-DBOOTPROG=\"gptboot\" \
 	-O1 \
 	-DGPT \
 	-D${GPTBOOT_UFS} \
 	-DSIOPRT=${BOOT_COMCONSOLE_PORT} \
 	-DSIOFMT=${B2SIOFMT} \
 	-DSIOSPD=${BOOT_COMCONSOLE_SPEED} \
 	-I${.CURDIR}/../../common \
 	-I${.CURDIR}/../common \
 	-I${.CURDIR}/../btx/lib -I. \
 	-I${.CURDIR}/../boot2 \
 	-I${.CURDIR}/../../.. \
-	-Wall -Waggregate-return -Wbad-function-cast -Wcast-align \
+	-Wall -Waggregate-return -Wbad-function-cast -Wno-cast-align \
 	-Wmissing-declarations -Wmissing-prototypes -Wnested-externs \
 	-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
-	-Winline
+	-Winline -Wno-pointer-sign
 
 CFLAGS.gcc+=	--param max-inline-insns-single=100
 
 .if !defined(LOADER_NO_GELI_SUPPORT)
 CFLAGS+=	-DLOADER_GELI_SUPPORT
 CFLAGS+=	-I${.CURDIR}/../../geli
 LIBGELIBOOT=	${.OBJDIR}/../../geli/libgeliboot.a
 .PATH:		${.CURDIR}/../../../opencrypto
 OPENCRYPTO_XTS=	xform_aes_xts.o
 .endif
 
 LD_FLAGS=-static -N --gc-sections
 
 LIBSTAND=	${.OBJDIR}/../../libstand32/libstand.a
 
 # Pick up ../Makefile.inc early.
 .include <bsd.init.mk>
 
 CLEANFILES=	gptboot
 
 gptboot: gptldr.bin gptboot.bin ${BTXKERN}
 	btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l gptldr.bin \
 	    -o ${.TARGET} gptboot.bin
 
 CLEANFILES+=	gptldr.bin gptldr.out gptldr.o
 
 gptldr.bin: gptldr.out
 	${OBJCOPY} -S -O binary gptldr.out ${.TARGET}
 
 gptldr.out: gptldr.o
 	${LD} ${LD_FLAGS} -e start -Ttext ${ORG1} -o ${.TARGET} gptldr.o
 
 CLEANFILES+=	gptboot.bin gptboot.out gptboot.o sio.o crc32.o drv.o \
 		cons.o util.o ${OPENCRYPTO_XTS}
 
 gptboot.bin: gptboot.out
 	${OBJCOPY} -S -O binary gptboot.out ${.TARGET}
 
 gptboot.out: ${BTXCRT} gptboot.o sio.o crc32.o drv.o cons.o util.o ${OPENCRYPTO_XTS}
 	${LD} ${LD_FLAGS} -Ttext ${ORG2} -o ${.TARGET} ${.ALLSRC} ${LIBSTAND} ${LIBGELIBOOT}
 
 gptboot.o: ${.CURDIR}/../../common/ufsread.c
 
 .if ${MACHINE_CPUARCH} == "amd64"
 beforedepend gptboot.o: machine
 CLEANFILES+=	machine
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../../i386/include machine
 .endif
 
 .include <bsd.prog.mk>
 
 # XXX: clang integrated-as doesn't grok .codeNN directives yet
 CFLAGS.gptldr.S=	${CLANG_NO_IAS}
Index: head/sys/boot/i386/gptboot/gptldr.S
===================================================================
--- head/sys/boot/i386/gptboot/gptldr.S	(revision 304320)
+++ head/sys/boot/i386/gptboot/gptldr.S	(revision 304321)
@@ -1,142 +1,142 @@
 /*-
  * Copyright (c) 2007 Yahoo!, Inc.
  * All rights reserved.
  * Written by: John Baldwin <jhb@FreeBSD.org>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  * 3. Neither the name of the author nor the names of any co-contributors
  *    may be used to endorse or promote products derived from this software
  *    without specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
  * $FreeBSD$
  *
  * Partly from: src/sys/boot/i386/boot2/boot1.S 1.31
  */
 
 /* Memory Locations */
 		.set MEM_REL,0x700		# Relocation address
 		.set MEM_ARG,0x900		# Arguments
 		.set MEM_ORG,0x7c00		# Origin
 		.set MEM_BUF,0x8cec		# Load area
 		.set MEM_BTX,0x9000		# BTX start
 		.set MEM_JMP,0x9010		# BTX entry point
 		.set MEM_USR,0xa000		# Client start
 		.set BDA_BOOT,0x472		# Boot howto flag
 	
 /* Misc. Constants */
 		.set SIZ_PAG,0x1000		# Page size
 		.set SIZ_SEC,0x200		# Sector size
-		.set COPY_BLKS,0x4		# Number of blocks
+		.set COPY_BLKS,0x8		# Number of blocks
 						# to copy for boot2
 		.set COPY_BLK_SZ,0x8000		# Copy in 32k blocks; must be
 						# a multiple of 16 bytes
 
 		.globl start
 		.code16
 
 /*
  * Copy BTX and boot2 to the right locations and start it all up.
  */
 
 /*
  * Setup the segment registers to flat addressing (segment 0) and setup the
  * stack to end just below the start of our code.
  */
 start:		xor %cx,%cx			# Zero
 		mov %cx,%es			# Address
 		mov %cx,%ds			#  data
 		mov %cx,%ss			# Set up
 		mov $start,%sp			#  stack
 
 /*
  * BTX is right after us at 'end'.  We read the length of BTX out of
  * its header to find boot2.  We need to copy boot2 to MEM_USR and BTX
  * to MEM_BTX.  Since those might overlap, we have to copy boot2
  * backwards first and then copy BTX.  We aren't sure exactly how long
  * boot2 is, but it's currently under 128kB so we'll copy 4 blocks of 32kB
  * each; this can be adjusted via COPY_BLK and COPY_BLK_SZ above.
  */
 		mov $end,%bx			# BTX
 		mov 0xa(%bx),%si		# Get BTX length and set
 		add %bx,%si			#  %si to start of boot2
 		dec %si				# Set %ds:%si to point at the
 		mov %si,%ax			# last byte we want to copy
 		shr $4,%ax			# from boot2, with %si made as
 		add $(COPY_BLKS*COPY_BLK_SZ/16),%ax	# small as possible.
 		and $0xf,%si			# 
 		mov %ax,%ds			#
 		mov $MEM_USR/16,%ax		# Set %es:(-1) to point at
 		add $(COPY_BLKS*COPY_BLK_SZ/16),%ax	# the last byte we
 		mov %ax,%es			# want to copy boot2 into.
 		mov $COPY_BLKS,%bx		# Copy COPY_BLKS 32k blocks
 copyloop:
 		add $COPY_BLK_SZ,%si		# Adjust %ds:%si to point at
 		mov %ds,%ax			# the end of the next 32k to
 		sub $COPY_BLK_SZ/16,%ax		# copy from boot2
 		mov %ax,%ds
 		mov $COPY_BLK_SZ-1,%di		# Adjust %es:%di to point at
 		mov %es,%ax			# the end of the next 32k into
 		sub $COPY_BLK_SZ/16,%ax		# which we want boot2 copied
 		mov %ax,%es
 		mov $COPY_BLK_SZ,%cx		# Copy 32k
 		std
 		rep movsb
 		dec %bx
 		jnz copyloop
 		mov %cx,%ds			# Reset %ds and %es
 		mov %cx,%es
 		mov $end,%bx			# BTX
 		mov 0xa(%bx),%cx		# Get BTX length and set
 		mov %bx,%si			#  %si to end of BTX
 		mov $MEM_BTX,%di		# %di -> end of BTX at
 		add %cx,%si			#  MEM_BTX
 		add %cx,%di
 		dec %si
 		dec %di
 		rep movsb			# Move BTX
 		cld				# String ops inc
 /*
  * Enable A20 so we can access memory above 1 meg.
  * Use the zero-valued %cx as a timeout for embedded hardware which do not
  * have a keyboard controller.
  */
 seta20: 	cli				# Disable interrupts
 seta20.1:	dec %cx				# Timeout?
 		jz seta20.3			# Yes
 		inb $0x64,%al			# Get status
 		testb $0x2,%al			# Busy?
 		jnz seta20.1			# Yes
 		movb $0xd1,%al			# Command: Write
 		outb %al,$0x64			#  output port
 seta20.2:	inb $0x64,%al			# Get status
 		testb $0x2,%al			# Busy?
 		jnz seta20.2			# Yes
 		movb $0xdf,%al			# Enable
 		outb %al,$0x60			#  A20
 seta20.3:	sti				# Enable interrupts
 
 /*
  * Save drive number from BIOS so boot2 can see it and start BTX.
  */
 		movb %dl,MEM_ARG
 		jmp MEM_JMP			# Start BTX
 end:
Index: head/sys/boot/i386/gptzfsboot/Makefile
===================================================================
--- head/sys/boot/i386/gptzfsboot/Makefile	(revision 304320)
+++ head/sys/boot/i386/gptzfsboot/Makefile	(revision 304321)
@@ -1,90 +1,92 @@
 # $FreeBSD$
 
 .PATH:		${.CURDIR}/../boot2 ${.CURDIR}/../gptboot \
 		${.CURDIR}/../zfsboot ${.CURDIR}/../common \
-		${.CURDIR}/../../common
+		${.CURDIR}/../../common ${.CURDIR}/../../../crypto/skein
 
 FILES=		gptzfsboot
 MAN=		gptzfsboot.8
 
 NM?=		nm
 
 BOOT_COMCONSOLE_PORT?= 0x3f8
 BOOT_COMCONSOLE_SPEED?= 9600
 B2SIOFMT?=	0x3
 
 REL1=	0x700
 ORG1=	0x7c00
 ORG2=	0x0
 
 CFLAGS=	-DBOOTPROG=\"gptzfsboot\" \
 	-O1 \
 	-DGPT -DBOOT2 \
 	-DSIOPRT=${BOOT_COMCONSOLE_PORT} \
 	-DSIOFMT=${B2SIOFMT} \
 	-DSIOSPD=${BOOT_COMCONSOLE_SPEED} \
 	-I${.CURDIR}/../../common \
 	-I${.CURDIR}/../common \
 	-I${.CURDIR}/../../zfs \
 	-I${.CURDIR}/../../../cddl/boot/zfs \
+	-I${.CURDIR}/../../../crypto/skein \
 	-I${.CURDIR}/../btx/lib -I. \
 	-I${.CURDIR}/../boot2 \
 	-I${.CURDIR}/../../.. \
-	-Wall -Waggregate-return -Wbad-function-cast -Wcast-align \
+	-Wall -Waggregate-return -Wbad-function-cast -Wno-cast-align \
 	-Wmissing-declarations -Wmissing-prototypes -Wnested-externs \
 	-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
-	-Winline
+	-Winline -Wno-tentative-definition-incomplete-type -Wno-pointer-sign
 
 .if !defined(LOADER_NO_GELI_SUPPORT)
 CFLAGS+=	-DLOADER_GELI_SUPPORT
 CFLAGS+=	-I${.CURDIR}/../../geli
 LIBGELIBOOT=	${.OBJDIR}/../../geli/libgeliboot.a
 .PATH:		${.CURDIR}/../../../opencrypto
 OPENCRYPTO_XTS=	xform_aes_xts.o
 .endif
 
 CFLAGS.gcc+=	--param max-inline-insns-single=100
 
 LD_FLAGS=-static -N --gc-sections
 
 LIBSTAND=	${.OBJDIR}/../../libstand32/libstand.a
 
 # Pick up ../Makefile.inc early.
 .include <bsd.init.mk>
 
 CLEANFILES=	gptzfsboot
 
 gptzfsboot: gptldr.bin gptzfsboot.bin ${BTXKERN}
 	btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l gptldr.bin \
 	    -o ${.TARGET} gptzfsboot.bin
 
 CLEANFILES+=	gptldr.bin gptldr.out gptldr.o
 
 gptldr.bin: gptldr.out
 	${OBJCOPY} -S -O binary gptldr.out ${.TARGET}
 
 gptldr.out: gptldr.o
 	${LD} ${LD_FLAGS} -e start -Ttext ${ORG1} -o ${.TARGET} gptldr.o
 
 CLEANFILES+=	gptzfsboot.bin gptzfsboot.out zfsboot.o sio.o cons.o \
-		drv.o gpt.o util.o ${OPENCRYPTO_XTS}
+		drv.o gpt.o util.o skein.o skein_block.o ${OPENCRYPTO_XTS}
 
 gptzfsboot.bin: gptzfsboot.out
 	${OBJCOPY} -S -O binary gptzfsboot.out ${.TARGET}
 
-gptzfsboot.out: ${BTXCRT} zfsboot.o sio.o gpt.o drv.o cons.o util.o ${OPENCRYPTO_XTS}
+gptzfsboot.out: ${BTXCRT} zfsboot.o sio.o gpt.o drv.o cons.o util.o \
+	skein.o skein_block.o ${OPENCRYPTO_XTS}
 	${LD} ${LD_FLAGS} -Ttext ${ORG2} -o ${.TARGET} ${.ALLSRC} ${LIBSTAND} ${LIBGELIBOOT}
 
 zfsboot.o: ${.CURDIR}/../../zfs/zfsimpl.c
 
 .if ${MACHINE_CPUARCH} == "amd64"
 beforedepend zfsboot.o: machine
 CLEANFILES+=	machine
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../../i386/include machine
 .endif
 
 .include <bsd.prog.mk>
 
 # XXX: clang integrated-as doesn't grok .codeNN directives yet
 CFLAGS.gptldr.S=	${CLANG_NO_IAS}
Index: head/sys/boot/i386/zfsboot/Makefile
===================================================================
--- head/sys/boot/i386/zfsboot/Makefile	(revision 304320)
+++ head/sys/boot/i386/zfsboot/Makefile	(revision 304321)
@@ -1,95 +1,98 @@
 # $FreeBSD$
 
-.PATH:		${.CURDIR}/../boot2 ${.CURDIR}/../common ${.CURDIR}/../../common
+.PATH:		${.CURDIR}/../boot2 ${.CURDIR}/../common \
+		${.CURDIR}/../../common ${.CURDIR}/../../../crypto/skein
 
 FILES=		zfsboot
 MAN=		zfsboot.8
 
 NM?=		nm
 
 BOOT_COMCONSOLE_PORT?= 0x3f8
 BOOT_COMCONSOLE_SPEED?= 9600
 B2SIOFMT?=	0x3
 
 REL1=	0x700
 ORG1=	0x7c00
 ORG2=	0x2000
 
 CFLAGS=	-DBOOTPROG=\"zfsboot\" \
 	-O1 \
 	-DBOOT2 \
 	-DSIOPRT=${BOOT_COMCONSOLE_PORT} \
 	-DSIOFMT=${B2SIOFMT} \
 	-DSIOSPD=${BOOT_COMCONSOLE_SPEED} \
 	-I${.CURDIR}/../../common \
 	-I${.CURDIR}/../common \
 	-I${.CURDIR}/../../zfs \
 	-I${.CURDIR}/../../../cddl/boot/zfs \
+	-I${.CURDIR}/../../../crypto/skein \
 	-I${.CURDIR}/../btx/lib -I. \
 	-I${.CURDIR}/../boot2 \
-	-Wall -Waggregate-return -Wbad-function-cast -Wcast-align \
+	-Wall -Waggregate-return -Wbad-function-cast -Wno-cast-align \
 	-Wmissing-declarations -Wmissing-prototypes -Wnested-externs \
 	-Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \
 	-Winline
 
 CFLAGS.gcc+=	--param max-inline-insns-single=100
 
 LD_FLAGS=-static -N --gc-sections
 
 LIBSTAND=	${.OBJDIR}/../../libstand32/libstand.a
 
 # Pick up ../Makefile.inc early.
 .include <bsd.init.mk>
 
 CLEANFILES=	zfsboot
 
 zfsboot: zfsboot1 zfsboot2
 	cat zfsboot1 zfsboot2 > zfsboot
 
 CLEANFILES+=	zfsboot1 zfsldr.out zfsldr.o
 
 zfsboot1: zfsldr.out
 	${OBJCOPY} -S -O binary zfsldr.out ${.TARGET}
 
 zfsldr.out: zfsldr.o
 	${LD} ${LD_FLAGS} -e start -Ttext ${ORG1} -o ${.TARGET} zfsldr.o
 
 CLEANFILES+=	zfsboot2 zfsboot.ld zfsboot.ldr zfsboot.bin zfsboot.out \
-		zfsboot.o zfsboot.s zfsboot.s.tmp sio.o cons.o drv.o util.o
+		zfsboot.o zfsboot.s zfsboot.s.tmp sio.o cons.o drv.o util.o \
+		skein.o skein_block.o
 
-# We currently allow 65536 bytes for zfsboot - in practice it could be
+# We currently allow 128k bytes for zfsboot - in practice it could be
 # any size up to 3.5Mb but keeping it fixed size simplifies zfsldr.
 # 
-BOOT2SIZE=	65536
+BOOT2SIZE=	131072
 
 zfsboot2: zfsboot.ld
 	@set -- `ls -l zfsboot.ld`; x=$$((${BOOT2SIZE}-$$5)); \
 	    echo "$$x bytes available"; test $$x -ge 0
 	dd if=zfsboot.ld of=${.TARGET} obs=${BOOT2SIZE} conv=osync status=none
 
 zfsboot.ld: zfsboot.ldr zfsboot.bin ${BTXKERN}
 	btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l zfsboot.ldr \
 	    -o ${.TARGET} -P 1 zfsboot.bin
 
 zfsboot.ldr:
 	cp /dev/null ${.TARGET}
 
 zfsboot.bin: zfsboot.out
 	${OBJCOPY} -S -O binary zfsboot.out ${.TARGET}
 
-zfsboot.out: ${BTXCRT} zfsboot.o sio.o drv.o cons.o util.o
+zfsboot.out: ${BTXCRT} zfsboot.o sio.o drv.o cons.o util.o skein.o skein_block.o
 	${LD} ${LD_FLAGS} -Ttext ${ORG2} -o ${.TARGET} ${.ALLSRC} ${LIBSTAND}
 
 SRCS=	zfsboot.c
 
 .if ${MACHINE_CPUARCH} == "amd64"
 beforedepend zfsboot.o: machine
 CLEANFILES+=	machine
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../../i386/include machine
 .endif
 
 .include <bsd.prog.mk>
 
 # XXX: clang integrated-as doesn't grok .codeNN directives yet
 CFLAGS.zfsldr.S=	${CLANG_NO_IAS}
Index: head/sys/boot/i386/zfsboot/zfsboot.c
===================================================================
--- head/sys/boot/i386/zfsboot/zfsboot.c	(revision 304320)
+++ head/sys/boot/i386/zfsboot/zfsboot.c	(revision 304321)
@@ -1,951 +1,951 @@
 /*-
  * Copyright (c) 1998 Robert Nordier
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms are freely
  * permitted provided that the above copyright notice and this
  * paragraph and the following disclaimer are duplicated in all
  * such forms.
  *
  * This software is provided "AS IS" and without any express or
  * implied warranties, including, without limitation, the implied
  * warranties of merchantability and fitness for a particular
  * purpose.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/errno.h>
 #include <sys/diskmbr.h>
 #ifdef GPT
 #include <sys/gpt.h>
 #endif
 #include <sys/reboot.h>
 #include <sys/queue.h>
 
 #include <machine/bootinfo.h>
 #include <machine/elf.h>
 #include <machine/pc/bios.h>
 
 #include <stdarg.h>
 #include <stddef.h>
 
 #include <a.out.h>
 
 #include <btxv86.h>
 
 #include "lib.h"
 #include "rbx.h"
 #include "drv.h"
 #include "util.h"
 #include "cons.h"
 #include "bootargs.h"
 #include "paths.h"
 
 #include "libzfs.h"
 
 #define ARGS			0x900
 #define NOPT			14
 #define NDEV			3
 
 #define BIOS_NUMDRIVES		0x475
 #define DRV_HARD		0x80
 #define DRV_MASK		0x7f
 
 #define TYPE_AD			0
 #define TYPE_DA			1
 #define TYPE_MAXHARD		TYPE_DA
 #define TYPE_FD			2
 
 #define DEV_GELIBOOT_BSIZE	4096
 
 extern uint32_t _end;
 
 #ifdef GPT
 static const uuid_t freebsd_zfs_uuid = GPT_ENT_TYPE_FREEBSD_ZFS;
 #endif
 static const char optstr[NOPT] = "DhaCcdgmnpqrsv"; /* Also 'P', 'S' */
 static const unsigned char flags[NOPT] = {
     RBX_DUAL,
     RBX_SERIAL,
     RBX_ASKNAME,
     RBX_CDROM,
     RBX_CONFIG,
     RBX_KDB,
     RBX_GDB,
     RBX_MUTE,
     RBX_NOINTR,
     RBX_PAUSE,
     RBX_QUIET,
     RBX_DFLTROOT,
     RBX_SINGLE,
     RBX_VERBOSE
 };
 uint32_t opts;
 
 static const unsigned char dev_maj[NDEV] = {30, 4, 2};
 
 static char cmd[512];
 static char cmddup[512];
 static char kname[1024];
 static char rootname[256];
 static int comspeed = SIOSPD;
 static struct bootinfo bootinfo;
 static uint32_t bootdev;
 static struct zfs_boot_args zfsargs;
 static struct zfsmount zfsmount;
 
 vm_offset_t	high_heap_base;
 uint32_t	bios_basemem, bios_extmem, high_heap_size;
 
 static struct bios_smap smap;
 
 /*
  * The minimum amount of memory to reserve in bios_extmem for the heap.
  */
-#define	HEAP_MIN		(3 * 1024 * 1024)
+#define	HEAP_MIN		(64 * 1024 * 1024)
 
 static char *heap_next;
 static char *heap_end;
 
 /* Buffers that must not span a 64k boundary. */
 #define READ_BUF_SIZE		8192
 struct dmadat {
 	char rdbuf[READ_BUF_SIZE];	/* for reading large things */
 	char secbuf[READ_BUF_SIZE];	/* for MBR/disklabel */
 };
 static struct dmadat *dmadat;
 
 void exit(int);
 static void load(void);
 static int parse(void);
 static void bios_getmem(void);
 void *malloc(size_t n);
 void free(void *ptr);
 
 void *
 malloc(size_t n)
 {
 	char *p = heap_next;
 	if (p + n > heap_end) {
 		printf("malloc failure\n");
 		for (;;)
 		    ;
 		/* NOTREACHED */
 		return (0);
 	}
 	heap_next += n;
 	return (p);
 }
 
 void
 free(void *ptr)
 {
 
 	return;
 }
 
 static char *
 strdup(const char *s)
 {
 	char *p = malloc(strlen(s) + 1);
 	strcpy(p, s);
 	return (p);
 }
 
 #ifdef LOADER_GELI_SUPPORT
 #include "geliboot.c"
 static char gelipw[GELI_PW_MAXLEN];
 #endif
 
 #include "zfsimpl.c"
 
 /*
  * Read from a dnode (which must be from a ZPL filesystem).
  */
 static int
 zfs_read(spa_t *spa, const dnode_phys_t *dnode, off_t *offp, void *start, size_t size)
 {
 	const znode_phys_t *zp = (const znode_phys_t *) dnode->dn_bonus;
 	size_t n;
 	int rc;
 
 	n = size;
 	if (*offp + n > zp->zp_size)
 		n = zp->zp_size - *offp;
 	
 	rc = dnode_read(spa, dnode, *offp, start, n);
 	if (rc)
 		return (-1);
 	*offp += n;
 
 	return (n);
 }
 
 /*
  * Current ZFS pool
  */
 static spa_t *spa;
 static spa_t *primary_spa;
 static vdev_t *primary_vdev;
 
 /*
  * A wrapper for dskread that doesn't have to worry about whether the
  * buffer pointer crosses a 64k boundary.
  */
 static int
 vdev_read(vdev_t *vdev, void *priv, off_t off, void *buf, size_t bytes)
 {
 	char *p;
 	daddr_t lba, alignlba;
 	off_t diff;
 	unsigned int nb, alignnb;
 	struct dsk *dsk = (struct dsk *) priv;
 
 	if ((off & (DEV_BSIZE - 1)) || (bytes & (DEV_BSIZE - 1)))
 		return -1;
 
 	p = buf;
 	lba = off / DEV_BSIZE;
 	lba += dsk->start;
 	/*
 	 * Align reads to 4k else 4k sector GELIs will not decrypt.
 	 * Round LBA down to nearest multiple of DEV_GELIBOOT_BSIZE bytes.
 	 */
 	alignlba = rounddown2(off, DEV_GELIBOOT_BSIZE) / DEV_BSIZE;
 	/*
 	 * The read must be aligned to DEV_GELIBOOT_BSIZE bytes relative to the
 	 * start of the GELI partition, not the start of the actual disk.
 	 */
 	alignlba += dsk->start;
 	diff = (lba - alignlba) * DEV_BSIZE;
 
 	while (bytes > 0) {
 		nb = bytes / DEV_BSIZE;
 		/*
 		 * Ensure that the read size plus the leading offset does not
 		 * exceed the size of the read buffer.
 		 */
 		if (nb > (READ_BUF_SIZE - diff) / DEV_BSIZE)
 			nb = (READ_BUF_SIZE - diff) / DEV_BSIZE;
 		/*
 		 * Round the number of blocks to read up to the nearest multiple
 		 * of DEV_GELIBOOT_BSIZE.
 		 */
 		alignnb = roundup2(nb * DEV_BSIZE + diff, DEV_GELIBOOT_BSIZE)
 		    / DEV_BSIZE;
 
 		if (drvread(dsk, dmadat->rdbuf, alignlba, alignnb))
 			return -1;
 #ifdef LOADER_GELI_SUPPORT
 		/* decrypt */
 		if (is_geli(dsk) == 0) {
 			if (geli_read(dsk, ((alignlba - dsk->start) *
 			    DEV_BSIZE), dmadat->rdbuf, alignnb * DEV_BSIZE))
 				return (-1);
 		}
 #endif
 		memcpy(p, dmadat->rdbuf + diff, nb * DEV_BSIZE);
 		p += nb * DEV_BSIZE;
 		lba += nb;
 		alignlba += alignnb;
 		bytes -= nb * DEV_BSIZE;
 		/* Don't need the leading offset after the first block. */
 		diff = 0;
 	}
 
 	return 0;
 }
 
 static int
 xfsread(const dnode_phys_t *dnode, off_t *offp, void *buf, size_t nbyte)
 {
     if ((size_t)zfs_read(spa, dnode, offp, buf, nbyte) != nbyte) {
 	printf("Invalid format\n");
 	return -1;
     }
     return 0;
 }
 
 static void
 bios_getmem(void)
 {
     uint64_t size;
 
     /* Parse system memory map */
     v86.ebx = 0;
     do {
 	v86.ctl = V86_FLAGS;
 	v86.addr = 0x15;		/* int 0x15 function 0xe820*/
 	v86.eax = 0xe820;
 	v86.ecx = sizeof(struct bios_smap);
 	v86.edx = SMAP_SIG;
 	v86.es = VTOPSEG(&smap);
 	v86.edi = VTOPOFF(&smap);
 	v86int();
 	if (V86_CY(v86.efl) || (v86.eax != SMAP_SIG))
 	    break;
 	/* look for a low-memory segment that's large enough */
 	if ((smap.type == SMAP_TYPE_MEMORY) && (smap.base == 0) &&
 	    (smap.length >= (512 * 1024)))
 	    bios_basemem = smap.length;
 	/* look for the first segment in 'extended' memory */
 	if ((smap.type == SMAP_TYPE_MEMORY) && (smap.base == 0x100000)) {
 	    bios_extmem = smap.length;
 	}
 
 	/*
 	 * Look for the largest segment in 'extended' memory beyond
 	 * 1MB but below 4GB.
 	 */
 	if ((smap.type == SMAP_TYPE_MEMORY) && (smap.base > 0x100000) &&
 	    (smap.base < 0x100000000ull)) {
 	    size = smap.length;
 
 	    /*
 	     * If this segment crosses the 4GB boundary, truncate it.
 	     */
 	    if (smap.base + size > 0x100000000ull)
 		size = 0x100000000ull - smap.base;
 
 	    if (size > high_heap_size) {
 		high_heap_size = size;
 		high_heap_base = smap.base;
 	    }
 	}
     } while (v86.ebx != 0);
 
     /* Fall back to the old compatibility function for base memory */
     if (bios_basemem == 0) {
 	v86.ctl = 0;
 	v86.addr = 0x12;		/* int 0x12 */
 	v86int();
 	
 	bios_basemem = (v86.eax & 0xffff) * 1024;
     }
 
     /* Fall back through several compatibility functions for extended memory */
     if (bios_extmem == 0) {
 	v86.ctl = V86_FLAGS;
 	v86.addr = 0x15;		/* int 0x15 function 0xe801*/
 	v86.eax = 0xe801;
 	v86int();
 	if (!V86_CY(v86.efl)) {
 	    bios_extmem = ((v86.ecx & 0xffff) + ((v86.edx & 0xffff) * 64)) * 1024;
 	}
     }
     if (bios_extmem == 0) {
 	v86.ctl = 0;
 	v86.addr = 0x15;		/* int 0x15 function 0x88*/
 	v86.eax = 0x8800;
 	v86int();
 	bios_extmem = (v86.eax & 0xffff) * 1024;
     }
 
     /*
      * If we have extended memory and did not find a suitable heap
      * region in the SMAP, use the last 3MB of 'extended' memory as a
      * high heap candidate.
      */
     if (bios_extmem >= HEAP_MIN && high_heap_size < HEAP_MIN) {
 	high_heap_size = HEAP_MIN;
 	high_heap_base = bios_extmem + 0x100000 - HEAP_MIN;
     }
 }
 
 /*
  * Try to detect a device supported by the legacy int13 BIOS
  */
 static int
 int13probe(int drive)
 {
     v86.ctl = V86_FLAGS;
     v86.addr = 0x13;
     v86.eax = 0x800;
     v86.edx = drive;
     v86int();
     
     if (!V86_CY(v86.efl) &&				/* carry clear */
 	((v86.edx & 0xff) != (drive & DRV_MASK))) {	/* unit # OK */
 	if ((v86.ecx & 0x3f) == 0) {			/* absurd sector size */
 		return(0);				/* skip device */
 	}
 	return (1);
     }
     return(0);
 }
 
 /*
  * We call this when we find a ZFS vdev - ZFS consumes the dsk
  * structure so we must make a new one.
  */
 static struct dsk *
 copy_dsk(struct dsk *dsk)
 {
     struct dsk *newdsk;
 
     newdsk = malloc(sizeof(struct dsk));
     *newdsk = *dsk;
     return (newdsk);
 }
 
 static void
 probe_drive(struct dsk *dsk)
 {
 #ifdef GPT
     struct gpt_hdr hdr;
     struct gpt_ent *ent;
     unsigned part, entries_per_sec;
     daddr_t slba;
 #endif
 #if defined(GPT) || defined(LOADER_GELI_SUPPORT)
     daddr_t elba;
 #endif
 
     struct dos_partition *dp;
     char *sec;
     unsigned i;
 
     /*
      * If we find a vdev on the whole disk, stop here.
      */
     if (vdev_probe(vdev_read, dsk, NULL) == 0)
 	return;
 
 #ifdef LOADER_GELI_SUPPORT
     /*
      * Taste the disk, if it is GELI encrypted, decrypt it and check to see if
      * it is a usable vdev then. Otherwise dig
      * out the partition table and probe each slice/partition
      * in turn for a vdev or GELI encrypted vdev.
      */
     elba = drvsize(dsk);
     if (elba > 0) {
 	elba--;
     }
     if (geli_taste(vdev_read, dsk, elba) == 0) {
 	if (geli_passphrase(&gelipw, dsk->unit, ':', 0, dsk) == 0) {
 	    if (vdev_probe(vdev_read, dsk, NULL) == 0) {
 		return;
 	    }
 	}
     }
 #endif /* LOADER_GELI_SUPPORT */
 
     sec = dmadat->secbuf;
     dsk->start = 0;
 
 #ifdef GPT
     /*
      * First check for GPT.
      */
     if (drvread(dsk, sec, 1, 1)) {
 	return;
     }
     memcpy(&hdr, sec, sizeof(hdr));
     if (memcmp(hdr.hdr_sig, GPT_HDR_SIG, sizeof(hdr.hdr_sig)) != 0 ||
 	hdr.hdr_lba_self != 1 || hdr.hdr_revision < 0x00010000 ||
 	hdr.hdr_entsz < sizeof(*ent) || DEV_BSIZE % hdr.hdr_entsz != 0) {
 	goto trymbr;
     }
 
     /*
      * Probe all GPT partitions for the presence of ZFS pools. We
      * return the spa_t for the first we find (if requested). This
      * will have the effect of booting from the first pool on the
      * disk.
      *
      * If no vdev is found, GELI decrypting the device and try again
      */
     entries_per_sec = DEV_BSIZE / hdr.hdr_entsz;
     slba = hdr.hdr_lba_table;
     elba = slba + hdr.hdr_entries / entries_per_sec;
     while (slba < elba) {
 	dsk->start = 0;
 	if (drvread(dsk, sec, slba, 1))
 	    return;
 	for (part = 0; part < entries_per_sec; part++) {
 	    ent = (struct gpt_ent *)(sec + part * hdr.hdr_entsz);
 	    if (memcmp(&ent->ent_type, &freebsd_zfs_uuid,
 		     sizeof(uuid_t)) == 0) {
 		dsk->start = ent->ent_lba_start;
 		dsk->slice = part + 1;
 		dsk->part = 255;
 		if (vdev_probe(vdev_read, dsk, NULL) == 0) {
 		    /*
 		     * This slice had a vdev. We need a new dsk
 		     * structure now since the vdev now owns this one.
 		     */
 		    dsk = copy_dsk(dsk);
 		}
 #ifdef LOADER_GELI_SUPPORT
 		else if (geli_taste(vdev_read, dsk, ent->ent_lba_end -
 			 ent->ent_lba_start) == 0) {
 		    if (geli_passphrase(&gelipw, dsk->unit, 'p', dsk->slice, dsk) == 0) {
 			/*
 			 * This slice has GELI, check it for ZFS.
 			 */
 			if (vdev_probe(vdev_read, dsk, NULL) == 0) {
 			    /*
 			     * This slice had a vdev. We need a new dsk
 			     * structure now since the vdev now owns this one.
 			     */
 			    dsk = copy_dsk(dsk);
 			}
 			break;
 		    }
 		}
 #endif /* LOADER_GELI_SUPPORT */
 	    }
 	}
 	slba++;
     }
     return;
 trymbr:
 #endif /* GPT */
 
     if (drvread(dsk, sec, DOSBBSECTOR, 1))
 	return;
     dp = (void *)(sec + DOSPARTOFF);
 
     for (i = 0; i < NDOSPART; i++) {
 	if (!dp[i].dp_typ)
 	    continue;
 	dsk->start = dp[i].dp_start;
 	dsk->slice = i + 1;
 	if (vdev_probe(vdev_read, dsk, NULL) == 0) {
 	    dsk = copy_dsk(dsk);
 	}
 #ifdef LOADER_GELI_SUPPORT
 	else if (geli_taste(vdev_read, dsk, dp[i].dp_size -
 		 dp[i].dp_start) == 0) {
 	    if (geli_passphrase(&gelipw, dsk->unit, 's', i, dsk) == 0) {
 		/*
 		 * This slice has GELI, check it for ZFS.
 		 */
 		if (vdev_probe(vdev_read, dsk, NULL) == 0) {
 		    /*
 		     * This slice had a vdev. We need a new dsk
 		     * structure now since the vdev now owns this one.
 		     */
 		    dsk = copy_dsk(dsk);
 		}
 		break;
 	    }
 	}
 #endif /* LOADER_GELI_SUPPORT */
     }
 }
 
 int
 main(void)
 {
     int autoboot, i;
     dnode_phys_t dn;
     off_t off;
     struct dsk *dsk;
 
     dmadat = (void *)(roundup2(__base + (int32_t)&_end, 0x10000) - __base);
 
     bios_getmem();
 
     if (high_heap_size > 0) {
 	heap_end = PTOV(high_heap_base + high_heap_size);
 	heap_next = PTOV(high_heap_base);
     } else {
 	heap_next = (char *)dmadat + sizeof(*dmadat);
 	heap_end = (char *)PTOV(bios_basemem);
     }
 
     dsk = malloc(sizeof(struct dsk));
     dsk->drive = *(uint8_t *)PTOV(ARGS);
     dsk->type = dsk->drive & DRV_HARD ? TYPE_AD : TYPE_FD;
     dsk->unit = dsk->drive & DRV_MASK;
     dsk->slice = *(uint8_t *)PTOV(ARGS + 1) + 1;
     dsk->part = 0;
     dsk->start = 0;
     dsk->init = 0;
 
     bootinfo.bi_version = BOOTINFO_VERSION;
     bootinfo.bi_size = sizeof(bootinfo);
     bootinfo.bi_basemem = bios_basemem / 1024;
     bootinfo.bi_extmem = bios_extmem / 1024;
     bootinfo.bi_memsizes_valid++;
     bootinfo.bi_bios_dev = dsk->drive;
 
     bootdev = MAKEBOOTDEV(dev_maj[dsk->type],
 			  dsk->slice, dsk->unit, dsk->part);
 
     /* Process configuration file */
 
     autoboot = 1;
 
 #ifdef LOADER_GELI_SUPPORT
     geli_init();
 #endif
     zfs_init();
 
     /*
      * Probe the boot drive first - we will try to boot from whatever
      * pool we find on that drive.
      */
     probe_drive(dsk);
 
     /*
      * Probe the rest of the drives that the bios knows about. This
      * will find any other available pools and it may fill in missing
      * vdevs for the boot pool.
      */
 #ifndef VIRTUALBOX
     for (i = 0; i < *(unsigned char *)PTOV(BIOS_NUMDRIVES); i++)
 #else
     for (i = 0; i < MAXBDDEV; i++)
 #endif
     {
 	if ((i | DRV_HARD) == *(uint8_t *)PTOV(ARGS))
 	    continue;
 
 	if (!int13probe(i | DRV_HARD))
 	    break;
 
 	dsk = malloc(sizeof(struct dsk));
 	dsk->drive = i | DRV_HARD;
 	dsk->type = dsk->drive & TYPE_AD;
 	dsk->unit = i;
 	dsk->slice = 0;
 	dsk->part = 0;
 	dsk->start = 0;
 	dsk->init = 0;
 	probe_drive(dsk);
     }
 
     /*
      * The first discovered pool, if any, is the pool.
      */
     spa = spa_get_primary();
     if (!spa) {
 	printf("%s: No ZFS pools located, can't boot\n", BOOTPROG);
 	for (;;)
 	    ;
     }
 
     primary_spa = spa;
     primary_vdev = spa_get_primary_vdev(spa);
 
     if (zfs_spa_init(spa) != 0 || zfs_mount(spa, 0, &zfsmount) != 0) {
 	printf("%s: failed to mount default pool %s\n",
 	    BOOTPROG, spa->spa_name);
 	autoboot = 0;
     } else if (zfs_lookup(&zfsmount, PATH_CONFIG, &dn) == 0 ||
         zfs_lookup(&zfsmount, PATH_DOTCONFIG, &dn) == 0) {
 	off = 0;
 	zfs_read(spa, &dn, &off, cmd, sizeof(cmd));
     }
 
     if (*cmd) {
 	/*
 	 * Note that parse() is destructive to cmd[] and we also want
 	 * to honor RBX_QUIET option that could be present in cmd[].
 	 */
 	memcpy(cmddup, cmd, sizeof(cmd));
 	if (parse())
 	    autoboot = 0;
 	if (!OPT_CHECK(RBX_QUIET))
 	    printf("%s: %s\n", PATH_CONFIG, cmddup);
 	/* Do not process this command twice */
 	*cmd = 0;
     }
 
     /*
      * Try to exec /boot/loader. If interrupted by a keypress,
      * or in case of failure, try to load a kernel directly instead.
      */
 
     if (autoboot && !*kname) {
 	memcpy(kname, PATH_LOADER_ZFS, sizeof(PATH_LOADER_ZFS));
 	if (!keyhit(3)) {
 	    load();
 	    memcpy(kname, PATH_KERNEL, sizeof(PATH_KERNEL));
 	}
     }
 
     /* Present the user with the boot2 prompt. */
 
     for (;;) {
 	if (!autoboot || !OPT_CHECK(RBX_QUIET)) {
 	    printf("\nFreeBSD/x86 boot\n");
 	    if (zfs_rlookup(spa, zfsmount.rootobj, rootname) != 0)
 		printf("Default: %s/<0x%llx>:%s\n"
 		       "boot: ",
 		       spa->spa_name, zfsmount.rootobj, kname);
 	    else if (rootname[0] != '\0')
 		printf("Default: %s/%s:%s\n"
 		       "boot: ",
 		       spa->spa_name, rootname, kname);
 	    else
 		printf("Default: %s:%s\n"
 		       "boot: ",
 		       spa->spa_name, kname);
 	}
 	if (ioctrl & IO_SERIAL)
 	    sio_flush();
 	if (!autoboot || keyhit(5))
 	    getstr(cmd, sizeof(cmd));
 	else if (!autoboot || !OPT_CHECK(RBX_QUIET))
 	    putchar('\n');
 	autoboot = 0;
 	if (parse())
 	    putchar('\a');
 	else
 	    load();
     }
 }
 
 /* XXX - Needed for btxld to link the boot2 binary; do not remove. */
 void
 exit(int x)
 {
 }
 
 static void
 load(void)
 {
     union {
 	struct exec ex;
 	Elf32_Ehdr eh;
     } hdr;
     static Elf32_Phdr ep[2];
     static Elf32_Shdr es[2];
     caddr_t p;
     dnode_phys_t dn;
     off_t off;
     uint32_t addr, x;
     int fmt, i, j;
 
     if (zfs_lookup(&zfsmount, kname, &dn)) {
 	printf("\nCan't find %s\n", kname);
 	return;
     }
     off = 0;
     if (xfsread(&dn, &off, &hdr, sizeof(hdr)))
 	return;
     if (N_GETMAGIC(hdr.ex) == ZMAGIC)
 	fmt = 0;
     else if (IS_ELF(hdr.eh))
 	fmt = 1;
     else {
 	printf("Invalid %s\n", "format");
 	return;
     }
     if (fmt == 0) {
 	addr = hdr.ex.a_entry & 0xffffff;
 	p = PTOV(addr);
 	off = PAGE_SIZE;
 	if (xfsread(&dn, &off, p, hdr.ex.a_text))
 	    return;
 	p += roundup2(hdr.ex.a_text, PAGE_SIZE);
 	if (xfsread(&dn, &off, p, hdr.ex.a_data))
 	    return;
 	p += hdr.ex.a_data + roundup2(hdr.ex.a_bss, PAGE_SIZE);
 	bootinfo.bi_symtab = VTOP(p);
 	memcpy(p, &hdr.ex.a_syms, sizeof(hdr.ex.a_syms));
 	p += sizeof(hdr.ex.a_syms);
 	if (hdr.ex.a_syms) {
 	    if (xfsread(&dn, &off, p, hdr.ex.a_syms))
 		return;
 	    p += hdr.ex.a_syms;
 	    if (xfsread(&dn, &off, p, sizeof(int)))
 		return;
 	    x = *(uint32_t *)p;
 	    p += sizeof(int);
 	    x -= sizeof(int);
 	    if (xfsread(&dn, &off, p, x))
 		return;
 	    p += x;
 	}
     } else {
 	off = hdr.eh.e_phoff;
 	for (j = i = 0; i < hdr.eh.e_phnum && j < 2; i++) {
 	    if (xfsread(&dn, &off, ep + j, sizeof(ep[0])))
 		return;
 	    if (ep[j].p_type == PT_LOAD)
 		j++;
 	}
 	for (i = 0; i < 2; i++) {
 	    p = PTOV(ep[i].p_paddr & 0xffffff);
 	    off = ep[i].p_offset;
 	    if (xfsread(&dn, &off, p, ep[i].p_filesz))
 		return;
 	}
 	p += roundup2(ep[1].p_memsz, PAGE_SIZE);
 	bootinfo.bi_symtab = VTOP(p);
 	if (hdr.eh.e_shnum == hdr.eh.e_shstrndx + 3) {
 	    off = hdr.eh.e_shoff + sizeof(es[0]) *
 		(hdr.eh.e_shstrndx + 1);
 	    if (xfsread(&dn, &off, &es, sizeof(es)))
 		return;
 	    for (i = 0; i < 2; i++) {
 		memcpy(p, &es[i].sh_size, sizeof(es[i].sh_size));
 		p += sizeof(es[i].sh_size);
 		off = es[i].sh_offset;
 		if (xfsread(&dn, &off, p, es[i].sh_size))
 		    return;
 		p += es[i].sh_size;
 	    }
 	}
 	addr = hdr.eh.e_entry & 0xffffff;
     }
     bootinfo.bi_esymtab = VTOP(p);
     bootinfo.bi_kernelname = VTOP(kname);
     zfsargs.size = sizeof(zfsargs);
     zfsargs.pool = zfsmount.spa->spa_guid;
     zfsargs.root = zfsmount.rootobj;
     zfsargs.primary_pool = primary_spa->spa_guid;
 #ifdef LOADER_GELI_SUPPORT
     bcopy(gelipw, zfsargs.gelipw, sizeof(zfsargs.gelipw));
     bzero(gelipw, sizeof(gelipw));
 #else
     zfsargs.gelipw[0] = '\0';
 #endif
     if (primary_vdev != NULL)
 	zfsargs.primary_vdev = primary_vdev->v_guid;
     else
 	printf("failed to detect primary vdev\n");
     __exec((caddr_t)addr, RB_BOOTINFO | (opts & RBX_MASK),
 	   bootdev,
 	   KARGS_FLAGS_ZFS | KARGS_FLAGS_EXTARG,
 	   (uint32_t) spa->spa_guid,
 	   (uint32_t) (spa->spa_guid >> 32),
 	   VTOP(&bootinfo),
 	   zfsargs);
 }
 
 static int
 zfs_mount_ds(char *dsname)
 {
     uint64_t newroot;
     spa_t *newspa;
     char *q;
 
     q = strchr(dsname, '/');
     if (q)
 	*q++ = '\0';
     newspa = spa_find_by_name(dsname);
     if (newspa == NULL) {
 	printf("\nCan't find ZFS pool %s\n", dsname);
 	return -1;
     }
 
     if (zfs_spa_init(newspa))
 	return -1;
 
     newroot = 0;
     if (q) {
 	if (zfs_lookup_dataset(newspa, q, &newroot)) {
 	    printf("\nCan't find dataset %s in ZFS pool %s\n",
 		    q, newspa->spa_name);
 	    return -1;
 	}
     }
     if (zfs_mount(newspa, newroot, &zfsmount)) {
 	printf("\nCan't mount ZFS dataset\n");
 	return -1;
     }
     spa = newspa;
     return (0);
 }
 
 static int
 parse(void)
 {
     char *arg = cmd;
     char *ep, *p, *q;
     const char *cp;
     int c, i, j;
 
     while ((c = *arg++)) {
 	if (c == ' ' || c == '\t' || c == '\n')
 	    continue;
 	for (p = arg; *p && *p != '\n' && *p != ' ' && *p != '\t'; p++);
 	ep = p;
 	if (*p)
 	    *p++ = 0;
 	if (c == '-') {
 	    while ((c = *arg++)) {
 		if (c == 'P') {
 		    if (*(uint8_t *)PTOV(0x496) & 0x10) {
 			cp = "yes";
 		    } else {
 			opts |= OPT_SET(RBX_DUAL) | OPT_SET(RBX_SERIAL);
 			cp = "no";
 		    }
 		    printf("Keyboard: %s\n", cp);
 		    continue;
 		} else if (c == 'S') {
 		    j = 0;
 		    while ((unsigned int)(i = *arg++ - '0') <= 9)
 			j = j * 10 + i;
 		    if (j > 0 && i == -'0') {
 			comspeed = j;
 			break;
 		    }
 		    /* Fall through to error below ('S' not in optstr[]). */
 		}
 		for (i = 0; c != optstr[i]; i++)
 		    if (i == NOPT - 1)
 			return -1;
 		opts ^= OPT_SET(flags[i]);
 	    }
 	    ioctrl = OPT_CHECK(RBX_DUAL) ? (IO_SERIAL|IO_KEYBOARD) :
 		     OPT_CHECK(RBX_SERIAL) ? IO_SERIAL : IO_KEYBOARD;
 	    if (ioctrl & IO_SERIAL) {
 	        if (sio_init(115200 / comspeed) != 0)
 		    ioctrl &= ~IO_SERIAL;
 	    }
 	} if (c == '?') {
 	    dnode_phys_t dn;
 
 	    if (zfs_lookup(&zfsmount, arg, &dn) == 0) {
 		zap_list(spa, &dn);
 	    }
 	    return -1;
 	} else {
 	    arg--;
 
 	    /*
 	     * Report pool status if the comment is 'status'. Lets
 	     * hope no-one wants to load /status as a kernel.
 	     */
 	    if (!strcmp(arg, "status")) {
 		spa_all_status();
 		return -1;
 	    }
 
 	    /*
 	     * If there is "zfs:" prefix simply ignore it.
 	     */
 	    if (strncmp(arg, "zfs:", 4) == 0)
 		arg += 4;
 
 	    /*
 	     * If there is a colon, switch pools.
 	     */
 	    q = strchr(arg, ':');
 	    if (q) {
 		*q++ = '\0';
 		if (zfs_mount_ds(arg) != 0)
 		    return -1;
 		arg = q;
 	    }
 	    if ((i = ep - arg)) {
 		if ((size_t)i >= sizeof(kname))
 		    return -1;
 		memcpy(kname, arg, i + 1);
 	    }
 	}
 	arg = p;
     }
     return 0;
 }
Index: head/sys/boot/i386/zfsboot/zfsldr.S
===================================================================
--- head/sys/boot/i386/zfsboot/zfsldr.S	(revision 304320)
+++ head/sys/boot/i386/zfsboot/zfsldr.S	(revision 304321)
@@ -1,266 +1,283 @@
 /*
  * Copyright (c) 1998 Robert Nordier
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms are freely
  * permitted provided that the above copyright notice and this
  * paragraph and the following disclaimer are duplicated in all
  * such forms.
  *
  * This software is provided "AS IS" and without any express or
  * implied warranties, including, without limitation, the implied
  * warranties of merchantability and fitness for a particular
  * purpose.
  *
  * $FreeBSD$
  */
 
 /* Memory Locations */
 		.set MEM_ARG,0x900		# Arguments
 		.set MEM_ORG,0x7c00		# Origin
 		.set MEM_BUF,0x8000		# Load area
 		.set MEM_BTX,0x9000		# BTX start
 		.set MEM_JMP,0x9010		# BTX entry point
 		.set MEM_USR,0xa000		# Client start
 		.set BDA_BOOT,0x472		# Boot howto flag
 	
 /* Partition Constants */
 		.set PRT_OFF,0x1be		# Partition offset
 		.set PRT_NUM,0x4		# Partitions
 		.set PRT_BSD,0xa5		# Partition type
 
 /* Misc. Constants */
 		.set SIZ_PAG,0x1000		# Page size
 		.set SIZ_SEC,0x200		# Sector size
-
-		.set NSECT,0x80
+		.set COPY_BLKS,0x8		# Number of blocks
+						# to copy for boot2
+		.set COPY_BLK_SZ,0x8000		# Copy in 32k blocks; must be
+						# a multiple of 16 bytes
+		.set NSECT,(COPY_BLK_SZ / SIZ_SEC * COPY_BLKS)
 		.globl start
 		.code16
 
 /*
  * Load the rest of zfsboot2 and BTX up, copy the parts to the right locations,
  * and start it all up.
  */
 
 /*
  * Setup the segment registers to flat addressing (segment 0) and setup the
  * stack to end just below the start of our code.
  */
 start:		cld				# String ops inc
 		xor %cx,%cx			# Zero
 		mov %cx,%es			# Address
 		mov %cx,%ds			#  data
 		mov %cx,%ss			# Set up
 		mov $start,%sp			#  stack
 /*
  * Load the MBR and look for the first FreeBSD slice.  We use the fake
  * partition entry below that points to the MBR when we call read.
  * The first pass looks for the first active FreeBSD slice.  The
  * second pass looks for the first non-active FreeBSD slice if the
  * first one fails.
  */
 		call check_edd		 	# Make sure EDD works
 		mov $part4,%si			# Dummy partition
 		xor %eax,%eax			# Read MBR
 		movl $MEM_BUF,%ebx		#  from first
 		call read			#  sector
 		mov $0x1,%cx	 		# Two passes
 main.1: 	mov $MEM_BUF+PRT_OFF,%si	# Partition table
 		movb $0x1,%dh			# Partition
 main.2: 	cmpb $PRT_BSD,0x4(%si)		# Our partition type?
 		jne main.3			# No
 		jcxz main.5			# If second pass
 		testb $0x80,(%si)		# Active?
 		jnz main.5			# Yes
 main.3: 	add $0x10,%si	 		# Next entry
 		incb %dh			# Partition
 		cmpb $0x1+PRT_NUM,%dh		# In table?
 		jb main.2			# Yes
 		dec %cx				# Do two
 		jcxz main.1			#  passes
 /*
  * If we get here, we didn't find any FreeBSD slices at all, so print an
  * error message and die.
  */
 		mov $msg_part,%si		# Message
 		jmp error			# Error
 
 /*
  * Ok, we have a slice and drive in %dx now, so use that to locate and
  * load boot2.  %si references the start of the slice we are looking
- * for, so go ahead and load up the 128 sectors starting at sector 1024
- * (i.e. after the two vdev labels).  We don't have do anything fancy
- * here to allow for an extra copy of boot1 and a partition table
- * (compare to this section of the UFS bootstrap) so we just load it
- * all at 0x9000. The first part of boot2 is BTX, which wants to run
- * at 0x9000. The boot2.bin binary starts right after the end of BTX,
+ * for, so go ahead and load up the COPY_BLKS*COPY_BLK_SZ/SIZ_SEC sectors
+ * starting at sector 1024 (i.e. after the two vdev labels).  We don't
+ * have do anything fancy here to allow for an extra copy of boot1 and
+ * a partition table (compare to this section of the UFS bootstrap) so we
+ * just load it all at 0x9000. The first part of boot2 is BTX, which wants
+ * to run at 0x9000. The boot2.bin binary starts right after the end of BTX,
  * so we have to figure out where the start of it is and then move the
  * binary to 0xc000.  Normally, BTX clients start at MEM_USR, or 0xa000,
  * but when we use btxld to create zfsboot2, we use an entry point of
  * 0x2000.  That entry point is relative to MEM_USR; thus boot2.bin
  * starts at 0xc000.
  *
  * The load area and the target area for the client overlap so we have
  * to use a decrementing string move. We also play segment register
  * games with the destination address for the move so that the client
  * can be larger than 16k (which would overflow the zero segment since
  * the client starts at 0xc000).
  */
 main.5: 	mov %dx,MEM_ARG			# Save args
 		mov $NSECT,%cx			# Sector count
 		movl $1024,%eax			# Offset to boot2
 		mov $MEM_BTX,%ebx		# Destination buffer
 main.6:		pushal				# Save params
 		call read			# Read disk
 		popal				# Restore
 		incl %eax			# Advance to
 		add $SIZ_SEC,%ebx		#  next sector
 		loop main.6			# If not last, read another
-		mov MEM_BTX+0xa,%bx		# Get BTX length
-		mov $NSECT*SIZ_SEC-1,%di	# Size of load area (less one)
-		mov %di,%si			# End of load area, 0x9000 rel
-		sub %bx,%di			# End of client, 0xc000 rel
-		mov %di,%cx			# Size of
-		inc %cx				#  client
-		mov $(MEM_BTX)>>4,%dx		# Segment
-		mov %dx,%ds			#   addressing 0x9000
-		mov $(MEM_USR+2*SIZ_PAG)>>4,%dx	# Segment
-		mov %dx,%es			#   addressing 0xc000
-		std				# Move with decrement
-		rep				# Relocate
-		movsb				#  client
+
+		mov $MEM_BTX,%bx		# BTX
+		mov 0xa(%bx),%si		# Get BTX length and set
+		add %bx,%si			#  %si to start of boot2
+		dec %si				# Set %ds:%si to point at the
+		mov %si,%ax			# last byte we want to copy
+		shr $4,%ax			# from boot2, with %si made as
+		add $(COPY_BLKS*COPY_BLK_SZ/16),%ax	# small as possible.
+		and $0xf,%si			#
+		mov %ax,%ds			#
+		mov $(MEM_USR+2*SIZ_PAG)/16,%ax # Set %es:(-1) to point at
+		add $(COPY_BLKS*COPY_BLK_SZ/16),%ax	# the last byte we
+		mov %ax,%es			# want to copy boot2 into.
+		mov $COPY_BLKS,%bx		# Copy COPY_BLKS 32k blocks
+copyloop:
+		add $COPY_BLK_SZ,%si		# Adjust %ds:%si to point at
+		mov %ds,%ax			# the end of the next 32k to
+		sub $COPY_BLK_SZ/16,%ax		# copy from boot2
+		mov %ax,%ds
+		mov $COPY_BLK_SZ-1,%di		# Adjust %es:%di to point at
+		mov %es,%ax			# the end of the next 32k into
+		sub $COPY_BLK_SZ/16,%ax		# which we want boot2 copied
+		mov %ax,%es
+		mov $COPY_BLK_SZ,%cx		# Copy 32k
+		std
+		rep movsb
+		dec %bx
+		jnz copyloop
+		mov %cx,%ds			# Reset %ds and %es
+		mov %cx,%es
 		cld				# Back to increment
-		xor %dx,%dx			# Back
-		mov %ds,%dx			#  to zero
-		mov %dx,%es			#  segment
 
 /*
  * Enable A20 so we can access memory above 1 meg.
  * Use the zero-valued %cx as a timeout for embedded hardware which do not
  * have a keyboard controller.
  */
 seta20: 	cli				# Disable interrupts
 seta20.1:	dec %cx				# Timeout?
 		jz seta20.3			# Yes
 		inb $0x64,%al			# Get status
 		testb $0x2,%al			# Busy?
 		jnz seta20.1			# Yes
 		movb $0xd1,%al			# Command: Write
 		outb %al,$0x64			#  output port
 seta20.2:	inb $0x64,%al			# Get status
 		testb $0x2,%al			# Busy?
 		jnz seta20.2			# Yes
 		movb $0xdf,%al			# Enable
 		outb %al,$0x60			#  A20
 seta20.3:	sti				# Enable interrupts
 
 		jmp start+MEM_JMP-MEM_ORG	# Start BTX
 
 
 /*
  * Read a sector from the disk.  Sets up an EDD packet on the stack
  * and passes it to read.  We assume that the destination address is
  * always segment-aligned.
  *
  * %eax		- int     - LBA to read in relative to partition start
  * %ebx		- ptr	  - destination address
  * %dl		- byte    - drive to read from
  * %si		- ptr     - MBR partition entry
  */
 read:		xor %ecx,%ecx			# Get
 		addl 0x8(%si),%eax		#  LBA
 		adc $0,%ecx
 		pushl %ecx			# Starting absolute block
 		pushl %eax			#  block number
 		shr $4,%ebx			# Convert to segment
 		push %bx			# Address of
 		push $0				#  transfer buffer
 		push $0x1			# Read 1 sector
 		push $0x10			# Size of packet
 		mov %sp,%si			# Packet pointer
 		mov $0x42,%ah			# BIOS: Extended
 		int $0x13			#  read
 		jc read.1			# If error, fail
 		lea 0x10(%si),%sp		# Clear stack
 		ret				# If success, return
 read.1:		mov %ah,%al			# Format
 		mov $read_err,%di		#  error
 		call hex8			#  code
 		mov $msg_read,%si		# Set the error message and
 						#  fall through to the error
 						#  routine
 /*
  * Print out the error message pointed to by %ds:(%si) followed
  * by a prompt, wait for a keypress, and then reboot the machine.
  */
 error:		callw putstr			# Display message
 		mov $prompt,%si			# Display
 		callw putstr			#  prompt
 		xorb %ah,%ah			# BIOS: Get
 		int $0x16			#  keypress
 		movw $0x1234, BDA_BOOT		# Do a warm boot
 		ljmp $0xffff,$0x0		# reboot the machine
 /*
  * Display a null-terminated string using the BIOS output.
  */
 putstr.0:	mov $0x7,%bx	 		# Page:attribute
 		movb $0xe,%ah			# BIOS: Display
 		int $0x10			#  character
 putstr: 	lodsb				# Get char
 		testb %al,%al			# End of string?
 		jne putstr.0			# No
 		ret				# To caller
 /*
  * Check to see if the disk supports EDD.  zfsboot requires EDD and does not
  * support older C/H/S disk I/O.
  */
 check_edd:	cmpb $0x80,%dl			# Hard drive?
 		jb check_edd.1 			# No, fail to boot
 		mov $0x55aa,%bx			# Magic
 		push %dx			# Save
 		movb $0x41,%ah			# BIOS: Check
 		int $0x13			#  extensions present
 		pop %dx				# Restore
 		jc check_edd.1			# If error, fail
 		cmp $0xaa55,%bx			# Magic?
 		jne check_edd.1			# No, so fail
 		testb $0x1,%cl			# Packet interface?
 		jz check_edd.1			# No, so fail
 		ret				# EDD ok, keep booting
 check_edd.1:	mov $msg_chs,%si		# Warn that CHS is
 		jmp error			#  unsupported and fail
 /*
  * AL to hex, saving the result to [EDI].
  */
 hex8:		push %ax			# Save
 		shrb $0x4,%al			# Do upper
 		call hex8.1			#  4
 		pop %ax				# Restore
 hex8.1: 	andb $0xf,%al			# Get lower 4
 		cmpb $0xa,%al			# Convert
 		sbbb $0x69,%al			#  to hex
 		das				#  digit
 		orb $0x20,%al			# To lower case
 		stosb				# Save char
 		ret				# (Recursive)
 
 /* Messages */
 
 msg_chs:	.asciz "CHS not supported"
 msg_read:	.ascii "Read error: "
 read_err:	.asciz "XX"
 msg_part:	.asciz "Boot error"
 
 prompt: 	.asciz "\r\n"
 
 		.org PRT_OFF,0x90
 
 /* Partition table */
 
 		.fill 0x30,0x1,0x0
 part4:		.byte 0x80, 0x00, 0x01, 0x00
 		.byte 0xa5, 0xfe, 0xff, 0xff
 		.byte 0x00, 0x00, 0x00, 0x00
 		.byte 0x50, 0xc3, 0x00, 0x00	# 50000 sectors long, bleh
 
 		.word 0xaa55			# Magic number
Index: head/sys/boot/userboot/ficl/Makefile
===================================================================
--- head/sys/boot/userboot/ficl/Makefile	(revision 304320)
+++ head/sys/boot/userboot/ficl/Makefile	(revision 304321)
@@ -1,60 +1,63 @@
 # $FreeBSD$
 #
 .include <bsd.own.mk>
 MK_SSP=		no
 
 .PATH: ${.CURDIR}/../../ficl
 .PATH: ${.CURDIR}/../../ficl/${MACHINE_CPUARCH}
 BASE_SRCS=	dict.c ficl.c fileaccess.c float.c loader.c math64.c \
 		prefix.c search.c stack.c tools.c vm.c words.c
 
 SRCS=		${BASE_SRCS} sysdep.c softcore.c
 CLEANFILES=	softcore.c testmain testmain.o
+
+CWARNFLAGS.loader.c.c += -Wno-implicit-function-declaration
+
 .if HAVE_PNP
 CFLAGS+=	-DHAVE_PNP
 .endif
 .include <bsd.stand.mk>
 .ifmake testmain
 CFLAGS+=	-DTESTMAIN -D_TESTMAIN
 SRCS+=		testmain.c
 PROG=		testmain
 .include <bsd.prog.mk>
 .else
 LIB=		ficl
 INTERNALLIB=
 .include <bsd.lib.mk>
 .endif
 
 # Standard softwords
 .PATH: ${.CURDIR}/../../ficl/softwords
 SOFTWORDS=	softcore.fr jhlocal.fr marker.fr freebsd.fr ficllocal.fr \
 		ifbrack.fr
 # Optional OO extension softwords
 #SOFTWORDS+=	oo.fr classes.fr
 
 #.if ${MACHINE_CPUARCH} == "amd64"
 #CFLAGS+=	-m32 -I.
 #.endif
 
 .if ${MACHINE_ARCH} == "powerpc64"
 CFLAGS+=	-m32 -mcpu=powerpc -I.
 .endif
 
 CFLAGS+=	-I${.CURDIR}/../../ficl
 CFLAGS+=	-I${.CURDIR}/../../ficl/${MACHINE_CPUARCH}
 CFLAGS+=	-I${.CURDIR}/../../common
 
 softcore.c: ${SOFTWORDS} softcore.awk
 	(cd ${.CURDIR}/../../ficl/softwords; cat ${SOFTWORDS} \
 	    | awk -f softcore.awk -v datestamp="`LC_ALL=C date`") > ${.TARGET}
 
 #.if ${MACHINE_CPUARCH} == "amd64"
 #${SRCS:M*.c:R:S/$/.o/g}: machine
 #
 #beforedepend ${OBJS}: machine
 #
 #machine: .NOMETA
 #	ln -sf ${.CURDIR}/../../i386/include machine
 #
 #CLEANFILES+=	machine
 #.endif
Index: head/sys/boot/userboot/userboot/Makefile
===================================================================
--- head/sys/boot/userboot/userboot/Makefile	(revision 304320)
+++ head/sys/boot/userboot/userboot/Makefile	(revision 304321)
@@ -1,68 +1,70 @@
 # $FreeBSD$
 
 MAN=
 
 .include <src.opts.mk>
 MK_SSP=		no
 
 SHLIB_NAME=	userboot.so
 MK_CTF=		no
 STRIP=
 LIBDIR=		/boot
 
 SRCS=		autoload.c
 SRCS+=		bcache.c
 SRCS+=		biossmap.c
 SRCS+=		bootinfo.c
 SRCS+=		bootinfo32.c
 SRCS+=		bootinfo64.c
 SRCS+=		conf.c
 SRCS+=		console.c
 SRCS+=		copy.c
 SRCS+=		devicename.c
 SRCS+=		elf32_freebsd.c
 SRCS+=		elf64_freebsd.c
 SRCS+=		host.c
 SRCS+=		main.c
 SRCS+=		userboot_cons.c
 SRCS+=		userboot_disk.c
 SRCS+=		vers.c
 
 CFLAGS+=	-Wall
 CFLAGS+=	-I${.CURDIR}/..
 CFLAGS+=	-I${.CURDIR}/../../common
 CFLAGS+=	-I${.CURDIR}/../../..
 CFLAGS+=	-I${.CURDIR}/../../../../lib/libstand
 CFLAGS+=	-ffreestanding -I.
 
+CWARNFLAGS.main.c += -Wno-implicit-function-declaration
+
 LDFLAGS+=	-nostdlib -Wl,-Bsymbolic
 
 NEWVERSWHAT=	"User boot" ${MACHINE_CPUARCH}
 
 vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version
 	sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT}
 
 CLEANFILES=	vers.c
 
 .if ${MK_FORTH} != "no"
 BOOT_FORTH=	yes
 CFLAGS+=        -DBOOT_FORTH -I${.CURDIR}/../../ficl -I${.CURDIR}/../../ficl/i386
 CFLAGS+=	-DBF_DICTSIZE=15000
 LIBFICL=	${.OBJDIR}/../ficl/libficl.a
 LIBSTAND=	${.OBJDIR}/../libstand/libstand.a
 .endif
 
 .if ${MK_ZFS} != "no"
 CFLAGS+=	-DUSERBOOT_ZFS_SUPPORT
 LIBZFSBOOT=	${.OBJDIR}/../zfs/libzfsboot.a
 .endif
 
 # Always add MI sources 
 .PATH:		${.CURDIR}/../../common
 .include	"${.CURDIR}/../../common/Makefile.inc"
 CFLAGS+=	-I${.CURDIR}/../../common
 CFLAGS+=	-I.
 DPADD+=		${LIBFICL} ${LIBZFSBOOT} ${LIBSTAND} 
 LDADD+=		${LIBFICL} ${LIBZFSBOOT} ${LIBSTAND}
 
 .include <bsd.lib.mk>
Index: head/sys/boot/userboot/zfs/Makefile
===================================================================
--- head/sys/boot/userboot/zfs/Makefile	(revision 304320)
+++ head/sys/boot/userboot/zfs/Makefile	(revision 304321)
@@ -1,18 +1,19 @@
 # $FreeBSD$
 
 S=		${.CURDIR}/../../zfs
 
-.PATH:		${S}
+.PATH:		${S} ${.CURDIR}/../../../crypto/skein
 LIB=		zfsboot
 INTERNALLIB=
 
-SRCS+=		zfs.c
+SRCS+=		zfs.c skein.c skein_block.c
 
 CFLAGS+=	-I${.CURDIR}/../../common -I${.CURDIR}/../../.. -I.
 CFLAGS+=	-I${.CURDIR}/../../../../lib/libstand
 CFLAGS+=	-I${.CURDIR}/../../../cddl/boot/zfs
+CFLAGS+=	-I${.CURDIR}/../../../crypto/skein
 
 CFLAGS+=	-ffreestanding -fPIC
 CFLAGS+=	-Wformat -Wall
 
 .include <bsd.lib.mk>
Index: head/sys/boot/zfs/Makefile
===================================================================
--- head/sys/boot/zfs/Makefile	(revision 304320)
+++ head/sys/boot/zfs/Makefile	(revision 304321)
@@ -1,35 +1,39 @@
 # $FreeBSD$
 
 LIB=		zfsboot
 INTERNALLIB=
 
 SRCS+=		zfs.c
 
+SRCS+=		skein.c skein_block.c
+.PATH:		${.CURDIR}/../../crypto/skein
+
 CFLAGS+=	-DBOOTPROG=\"zfsloader\"
 CFLAGS+=	-I${.CURDIR}/../common -I${.CURDIR}/../.. -I.
 CFLAGS+=	-I${.CURDIR}/../../../lib/libstand
 CFLAGS+=	-I${.CURDIR}/../../cddl/boot/zfs
+CFLAGS+=	-I${.CURDIR}/../../crypto/skein
 
 .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64"
 CFLAGS+=	-march=i386
 .endif
 .if ${MACHINE_CPUARCH} == "amd64"
 CFLAGS+=	-m32
 .endif
 
 CFLAGS+=	-Wformat -Wall
 
 .if ${MACHINE_CPUARCH} == "amd64"
 CLEANFILES+=    machine
 machine: .NOMETA
 	ln -sf ${.CURDIR}/../../i386/include machine
 .endif
 
 .include <bsd.stand.mk>
 .include <bsd.lib.mk>
 
 .if ${MACHINE_CPUARCH} == "amd64"
 .if !exists(machine)
 beforedepend ${OBJS}: machine
 .endif
 .endif
Index: head/sys/boot/zfs/zfsimpl.c
===================================================================
--- head/sys/boot/zfs/zfsimpl.c	(revision 304320)
+++ head/sys/boot/zfs/zfsimpl.c	(revision 304321)
@@ -1,2267 +1,2354 @@
 /*-
  * Copyright (c) 2007 Doug Rabson
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 /*
  *	Stand-alone ZFS file reader.
  */
 
 #include <sys/stat.h>
 #include <sys/stdint.h>
 
 #include "zfsimpl.h"
 #include "zfssubr.c"
 
 
 struct zfsmount {
 	const spa_t	*spa;
 	objset_phys_t	objset;
 	uint64_t	rootobj;
 };
 
 /*
  * List of all vdevs, chained through v_alllink.
  */
 static vdev_list_t zfs_vdevs;
 
  /*
  * List of ZFS features supported for read
  */
 static const char *features_for_read[] = {
 	"org.illumos:lz4_compress",
 	"com.delphix:hole_birth",
 	"com.delphix:extensible_dataset",
 	"com.delphix:embedded_data",
 	"org.open-zfs:large_blocks",
+	"org.illumos:sha512",
+	"org.illumos:skein",
 	NULL
 };
 
 /*
  * List of all pools, chained through spa_link.
  */
 static spa_list_t zfs_pools;
 
 static uint64_t zfs_crc64_table[256];
 static const dnode_phys_t *dnode_cache_obj = 0;
 static uint64_t dnode_cache_bn;
 static char *dnode_cache_buf;
 static char *zap_scratch;
 static char *zfs_temp_buf, *zfs_temp_end, *zfs_temp_ptr;
 
 #define TEMP_SIZE	(1024 * 1024)
 
 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf);
 static int zfs_get_root(const spa_t *spa, uint64_t *objid);
 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result);
+static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode,
+    const char *name, uint64_t integer_size, uint64_t num_integers,
+    void *value);
 
 static void
 zfs_init(void)
 {
 	STAILQ_INIT(&zfs_vdevs);
 	STAILQ_INIT(&zfs_pools);
 
 	zfs_temp_buf = malloc(TEMP_SIZE);
 	zfs_temp_end = zfs_temp_buf + TEMP_SIZE;
 	zfs_temp_ptr = zfs_temp_buf;
 	dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
 	zap_scratch = malloc(SPA_MAXBLOCKSIZE);
 
 	zfs_init_crc();
 }
 
 static void *
 zfs_alloc(size_t size)
 {
 	char *ptr;
 
 	if (zfs_temp_ptr + size > zfs_temp_end) {
 		printf("ZFS: out of temporary buffer space\n");
 		for (;;) ;
 	}
 	ptr = zfs_temp_ptr;
 	zfs_temp_ptr += size;
 
 	return (ptr);
 }
 
 static void
 zfs_free(void *ptr, size_t size)
 {
 
 	zfs_temp_ptr -= size;
 	if (zfs_temp_ptr != ptr) {
 		printf("ZFS: zfs_alloc()/zfs_free() mismatch\n");
 		for (;;) ;
 	}
 }
 
 static int
 xdr_int(const unsigned char **xdr, int *ip)
 {
 	*ip = ((*xdr)[0] << 24)
 		| ((*xdr)[1] << 16)
 		| ((*xdr)[2] << 8)
 		| ((*xdr)[3] << 0);
 	(*xdr) += 4;
 	return (0);
 }
 
 static int
 xdr_u_int(const unsigned char **xdr, u_int *ip)
 {
 	*ip = ((*xdr)[0] << 24)
 		| ((*xdr)[1] << 16)
 		| ((*xdr)[2] << 8)
 		| ((*xdr)[3] << 0);
 	(*xdr) += 4;
 	return (0);
 }
 
 static int
 xdr_uint64_t(const unsigned char **xdr, uint64_t *lp)
 {
 	u_int hi, lo;
 
 	xdr_u_int(xdr, &hi);
 	xdr_u_int(xdr, &lo);
 	*lp = (((uint64_t) hi) << 32) | lo;
 	return (0);
 }
 
 static int
 nvlist_find(const unsigned char *nvlist, const char *name, int type,
 	    int* elementsp, void *valuep)
 {
 	const unsigned char *p, *pair;
 	int junk;
 	int encoded_size, decoded_size;
 
 	p = nvlist;
 	xdr_int(&p, &junk);
 	xdr_int(&p, &junk);
 
 	pair = p;
 	xdr_int(&p, &encoded_size);
 	xdr_int(&p, &decoded_size);
 	while (encoded_size && decoded_size) {
 		int namelen, pairtype, elements;
 		const char *pairname;
 
 		xdr_int(&p, &namelen);
 		pairname = (const char*) p;
 		p += roundup(namelen, 4);
 		xdr_int(&p, &pairtype);
 
 		if (!memcmp(name, pairname, namelen) && type == pairtype) {
 			xdr_int(&p, &elements);
 			if (elementsp)
 				*elementsp = elements;
 			if (type == DATA_TYPE_UINT64) {
 				xdr_uint64_t(&p, (uint64_t *) valuep);
 				return (0);
 			} else if (type == DATA_TYPE_STRING) {
 				int len;
 				xdr_int(&p, &len);
 				(*(const char**) valuep) = (const char*) p;
 				return (0);
 			} else if (type == DATA_TYPE_NVLIST
 				   || type == DATA_TYPE_NVLIST_ARRAY) {
 				(*(const unsigned char**) valuep) =
 					 (const unsigned char*) p;
 				return (0);
 			} else {
 				return (EIO);
 			}
 		} else {
 			/*
 			 * Not the pair we are looking for, skip to the next one.
 			 */
 			p = pair + encoded_size;
 		}
 
 		pair = p;
 		xdr_int(&p, &encoded_size);
 		xdr_int(&p, &decoded_size);
 	}
 
 	return (EIO);
 }
 
 static int
 nvlist_check_features_for_read(const unsigned char *nvlist)
 {
 	const unsigned char *p, *pair;
 	int junk;
 	int encoded_size, decoded_size;
 	int rc;
 
 	rc = 0;
 
 	p = nvlist;
 	xdr_int(&p, &junk);
 	xdr_int(&p, &junk);
 
 	pair = p;
 	xdr_int(&p, &encoded_size);
 	xdr_int(&p, &decoded_size);
 	while (encoded_size && decoded_size) {
 		int namelen, pairtype;
 		const char *pairname;
 		int i, found;
 
 		found = 0;
 
 		xdr_int(&p, &namelen);
 		pairname = (const char*) p;
 		p += roundup(namelen, 4);
 		xdr_int(&p, &pairtype);
 
 		for (i = 0; features_for_read[i] != NULL; i++) {
 			if (!memcmp(pairname, features_for_read[i], namelen)) {
 				found = 1;
 				break;
 			}
 		}
 
 		if (!found) {
 			printf("ZFS: unsupported feature: %s\n", pairname);
 			rc = EIO;
 		}
 
 		p = pair + encoded_size;
 
 		pair = p;
 		xdr_int(&p, &encoded_size);
 		xdr_int(&p, &decoded_size);
 	}
 
 	return (rc);
 }
 
 /*
  * Return the next nvlist in an nvlist array.
  */
 static const unsigned char *
 nvlist_next(const unsigned char *nvlist)
 {
 	const unsigned char *p, *pair;
 	int junk;
 	int encoded_size, decoded_size;
 
 	p = nvlist;
 	xdr_int(&p, &junk);
 	xdr_int(&p, &junk);
 
 	pair = p;
 	xdr_int(&p, &encoded_size);
 	xdr_int(&p, &decoded_size);
 	while (encoded_size && decoded_size) {
 		p = pair + encoded_size;
 
 		pair = p;
 		xdr_int(&p, &encoded_size);
 		xdr_int(&p, &decoded_size);
 	}
 
 	return p;
 }
 
 #ifdef TEST
 
 static const unsigned char *
 nvlist_print(const unsigned char *nvlist, unsigned int indent)
 {
 	static const char* typenames[] = {
 		"DATA_TYPE_UNKNOWN",
 		"DATA_TYPE_BOOLEAN",
 		"DATA_TYPE_BYTE",
 		"DATA_TYPE_INT16",
 		"DATA_TYPE_UINT16",
 		"DATA_TYPE_INT32",
 		"DATA_TYPE_UINT32",
 		"DATA_TYPE_INT64",
 		"DATA_TYPE_UINT64",
 		"DATA_TYPE_STRING",
 		"DATA_TYPE_BYTE_ARRAY",
 		"DATA_TYPE_INT16_ARRAY",
 		"DATA_TYPE_UINT16_ARRAY",
 		"DATA_TYPE_INT32_ARRAY",
 		"DATA_TYPE_UINT32_ARRAY",
 		"DATA_TYPE_INT64_ARRAY",
 		"DATA_TYPE_UINT64_ARRAY",
 		"DATA_TYPE_STRING_ARRAY",
 		"DATA_TYPE_HRTIME",
 		"DATA_TYPE_NVLIST",
 		"DATA_TYPE_NVLIST_ARRAY",
 		"DATA_TYPE_BOOLEAN_VALUE",
 		"DATA_TYPE_INT8",
 		"DATA_TYPE_UINT8",
 		"DATA_TYPE_BOOLEAN_ARRAY",
 		"DATA_TYPE_INT8_ARRAY",
 		"DATA_TYPE_UINT8_ARRAY"
 	};
 
 	unsigned int i, j;
 	const unsigned char *p, *pair;
 	int junk;
 	int encoded_size, decoded_size;
 
 	p = nvlist;
 	xdr_int(&p, &junk);
 	xdr_int(&p, &junk);
 
 	pair = p;
 	xdr_int(&p, &encoded_size);
 	xdr_int(&p, &decoded_size);
 	while (encoded_size && decoded_size) {
 		int namelen, pairtype, elements;
 		const char *pairname;
 
 		xdr_int(&p, &namelen);
 		pairname = (const char*) p;
 		p += roundup(namelen, 4);
 		xdr_int(&p, &pairtype);
 
 		for (i = 0; i < indent; i++)
 			printf(" ");
 		printf("%s %s", typenames[pairtype], pairname);
 
 		xdr_int(&p, &elements);
 		switch (pairtype) {
 		case DATA_TYPE_UINT64: {
 			uint64_t val;
 			xdr_uint64_t(&p, &val);
 			printf(" = 0x%jx\n", (uintmax_t)val);
 			break;
 		}
 
 		case DATA_TYPE_STRING: {
 			int len;
 			xdr_int(&p, &len);
 			printf(" = \"%s\"\n", p);
 			break;
 		}
 
 		case DATA_TYPE_NVLIST:
 			printf("\n");
 			nvlist_print(p, indent + 1);
 			break;
 
 		case DATA_TYPE_NVLIST_ARRAY:
 			for (j = 0; j < elements; j++) {
 				printf("[%d]\n", j);
 				p = nvlist_print(p, indent + 1);
 				if (j != elements - 1) {
 					for (i = 0; i < indent; i++)
 						printf(" ");
 					printf("%s %s", typenames[pairtype], pairname);
 				}
 			}
 			break;
 
 		default:
 			printf("\n");
 		}
 
 		p = pair + encoded_size;
 
 		pair = p;
 		xdr_int(&p, &encoded_size);
 		xdr_int(&p, &decoded_size);
 	}
 
 	return p;
 }
 
 #endif
 
 static int
 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf,
     off_t offset, size_t size)
 {
 	size_t psize;
 	int rc;
 
 	if (!vdev->v_phys_read)
 		return (EIO);
 
 	if (bp) {
 		psize = BP_GET_PSIZE(bp);
 	} else {
 		psize = size;
 	}
 
 	/*printf("ZFS: reading %d bytes at 0x%jx to %p\n", psize, (uintmax_t)offset, buf);*/
 	rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize);
 	if (rc)
 		return (rc);
-	if (bp && zio_checksum_verify(bp, buf))
+	if (bp && zio_checksum_verify(vdev->spa, bp, buf))
 		return (EIO);
 
 	return (0);
 }
 
 static int
 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
     off_t offset, size_t bytes)
 {
 
 	return (vdev_read_phys(vdev, bp, buf,
 		offset + VDEV_LABEL_START_SIZE, bytes));
 }
 
 
 static int
 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
     off_t offset, size_t bytes)
 {
 	vdev_t *kid;
 	int rc;
 
 	rc = EIO;
 	STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
 		if (kid->v_state != VDEV_STATE_HEALTHY)
 			continue;
 		rc = kid->v_read(kid, bp, buf, offset, bytes);
 		if (!rc)
 			return (0);
 	}
 
 	return (rc);
 }
 
 static int
 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
     off_t offset, size_t bytes)
 {
 	vdev_t *kid;
 
 	/*
 	 * Here we should have two kids:
 	 * First one which is the one we are replacing and we can trust
 	 * only this one to have valid data, but it might not be present.
 	 * Second one is that one we are replacing with. It is most likely
 	 * healthy, but we can't trust it has needed data, so we won't use it.
 	 */
 	kid = STAILQ_FIRST(&vdev->v_children);
 	if (kid == NULL)
 		return (EIO);
 	if (kid->v_state != VDEV_STATE_HEALTHY)
 		return (EIO);
 	return (kid->v_read(kid, bp, buf, offset, bytes));
 }
 
 static vdev_t *
 vdev_find(uint64_t guid)
 {
 	vdev_t *vdev;
 
 	STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
 		if (vdev->v_guid == guid)
 			return (vdev);
 
 	return (0);
 }
 
 static vdev_t *
 vdev_create(uint64_t guid, vdev_read_t *read)
 {
 	vdev_t *vdev;
 
 	vdev = malloc(sizeof(vdev_t));
 	memset(vdev, 0, sizeof(vdev_t));
 	STAILQ_INIT(&vdev->v_children);
 	vdev->v_guid = guid;
 	vdev->v_state = VDEV_STATE_OFFLINE;
 	vdev->v_read = read;
 	vdev->v_phys_read = 0;
 	vdev->v_read_priv = 0;
 	STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
 
 	return (vdev);
 }
 
 static int
 vdev_init_from_nvlist(const unsigned char *nvlist, vdev_t *pvdev,
     vdev_t **vdevp, int is_newer)
 {
 	int rc;
 	uint64_t guid, id, ashift, nparity;
 	const char *type;
 	const char *path;
 	vdev_t *vdev, *kid;
 	const unsigned char *kids;
 	int nkids, i, is_new;
 	uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present;
 
 	if (nvlist_find(nvlist, ZPOOL_CONFIG_GUID,
 			DATA_TYPE_UINT64, 0, &guid)
 	    || nvlist_find(nvlist, ZPOOL_CONFIG_ID,
 			   DATA_TYPE_UINT64, 0, &id)
 	    || nvlist_find(nvlist, ZPOOL_CONFIG_TYPE,
 			   DATA_TYPE_STRING, 0, &type)) {
 		printf("ZFS: can't find vdev details\n");
 		return (ENOENT);
 	}
 
 	if (strcmp(type, VDEV_TYPE_MIRROR)
 	    && strcmp(type, VDEV_TYPE_DISK)
 #ifdef ZFS_TEST
 	    && strcmp(type, VDEV_TYPE_FILE)
 #endif
 	    && strcmp(type, VDEV_TYPE_RAIDZ)
 	    && strcmp(type, VDEV_TYPE_REPLACING)) {
 		printf("ZFS: can only boot from disk, mirror, raidz1, raidz2 and raidz3 vdevs\n");
 		return (EIO);
 	}
 
 	is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0;
 
 	nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, 0,
 			&is_offline);
 	nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, 0,
 			&is_removed);
 	nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, 0,
 			&is_faulted);
 	nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, 0,
 			&is_degraded);
 	nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, 0,
 			&isnt_present);
 
 	vdev = vdev_find(guid);
 	if (!vdev) {
 		is_new = 1;
 
 		if (!strcmp(type, VDEV_TYPE_MIRROR))
 			vdev = vdev_create(guid, vdev_mirror_read);
 		else if (!strcmp(type, VDEV_TYPE_RAIDZ))
 			vdev = vdev_create(guid, vdev_raidz_read);
 		else if (!strcmp(type, VDEV_TYPE_REPLACING))
 			vdev = vdev_create(guid, vdev_replacing_read);
 		else
 			vdev = vdev_create(guid, vdev_disk_read);
 
 		vdev->v_id = id;
 		vdev->v_top = pvdev != NULL ? pvdev : vdev;
 		if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT,
 			DATA_TYPE_UINT64, 0, &ashift) == 0)
 			vdev->v_ashift = ashift;
 		else
 			vdev->v_ashift = 0;
 		if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY,
 			DATA_TYPE_UINT64, 0, &nparity) == 0)
 			vdev->v_nparity = nparity;
 		else
 			vdev->v_nparity = 0;
 		if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH,
 				DATA_TYPE_STRING, 0, &path) == 0) {
 			if (strncmp(path, "/dev/", 5) == 0)
 				path += 5;
 			vdev->v_name = strdup(path);
 		} else {
 			if (!strcmp(type, "raidz")) {
 				if (vdev->v_nparity == 1)
 					vdev->v_name = "raidz1";
 				else if (vdev->v_nparity == 2)
 					vdev->v_name = "raidz2";
 				else if (vdev->v_nparity == 3)
 					vdev->v_name = "raidz3";
 				else {
 					printf("ZFS: can only boot from disk, mirror, raidz1, raidz2 and raidz3 vdevs\n");
 					return (EIO);
 				}
 			} else {
 				vdev->v_name = strdup(type);
 			}
 		}
 	} else {
 		is_new = 0;
 	}
 
 	if (is_new || is_newer) {
 		/*
 		 * This is either new vdev or we've already seen this vdev,
 		 * but from an older vdev label, so let's refresh its state
 		 * from the newer label.
 		 */
 		if (is_offline)
 			vdev->v_state = VDEV_STATE_OFFLINE;
 		else if (is_removed)
 			vdev->v_state = VDEV_STATE_REMOVED;
 		else if (is_faulted)
 			vdev->v_state = VDEV_STATE_FAULTED;
 		else if (is_degraded)
 			vdev->v_state = VDEV_STATE_DEGRADED;
 		else if (isnt_present)
 			vdev->v_state = VDEV_STATE_CANT_OPEN;
 	}
 
 	rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN,
 			 DATA_TYPE_NVLIST_ARRAY, &nkids, &kids);
 	/*
 	 * Its ok if we don't have any kids.
 	 */
 	if (rc == 0) {
 		vdev->v_nchildren = nkids;
 		for (i = 0; i < nkids; i++) {
 			rc = vdev_init_from_nvlist(kids, vdev, &kid, is_newer);
 			if (rc)
 				return (rc);
 			if (is_new)
 				STAILQ_INSERT_TAIL(&vdev->v_children, kid,
 						   v_childlink);
 			kids = nvlist_next(kids);
 		}
 	} else {
 		vdev->v_nchildren = 0;
 	}
 
 	if (vdevp)
 		*vdevp = vdev;
 	return (0);
 }
 
 static void
 vdev_set_state(vdev_t *vdev)
 {
 	vdev_t *kid;
 	int good_kids;
 	int bad_kids;
 
 	/*
 	 * A mirror or raidz is healthy if all its kids are healthy. A
 	 * mirror is degraded if any of its kids is healthy; a raidz
 	 * is degraded if at most nparity kids are offline.
 	 */
 	if (STAILQ_FIRST(&vdev->v_children)) {
 		good_kids = 0;
 		bad_kids = 0;
 		STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
 			if (kid->v_state == VDEV_STATE_HEALTHY)
 				good_kids++;
 			else
 				bad_kids++;
 		}
 		if (bad_kids == 0) {
 			vdev->v_state = VDEV_STATE_HEALTHY;
 		} else {
 			if (vdev->v_read == vdev_mirror_read) {
 				if (good_kids) {
 					vdev->v_state = VDEV_STATE_DEGRADED;
 				} else {
 					vdev->v_state = VDEV_STATE_OFFLINE;
 				}
 			} else if (vdev->v_read == vdev_raidz_read) {
 				if (bad_kids > vdev->v_nparity) {
 					vdev->v_state = VDEV_STATE_OFFLINE;
 				} else {
 					vdev->v_state = VDEV_STATE_DEGRADED;
 				}
 			}
 		}
 	}
 }
 
 static spa_t *
 spa_find_by_guid(uint64_t guid)
 {
 	spa_t *spa;
 
 	STAILQ_FOREACH(spa, &zfs_pools, spa_link)
 		if (spa->spa_guid == guid)
 			return (spa);
 
 	return (0);
 }
 
 static spa_t *
 spa_find_by_name(const char *name)
 {
 	spa_t *spa;
 
 	STAILQ_FOREACH(spa, &zfs_pools, spa_link)
 		if (!strcmp(spa->spa_name, name))
 			return (spa);
 
 	return (0);
 }
 
 #ifdef BOOT2
 static spa_t *
 spa_get_primary(void)
 {
 
 	return (STAILQ_FIRST(&zfs_pools));
 }
 
 static vdev_t *
 spa_get_primary_vdev(const spa_t *spa)
 {
 	vdev_t *vdev;
 	vdev_t *kid;
 
 	if (spa == NULL)
 		spa = spa_get_primary();
 	if (spa == NULL)
 		return (NULL);
 	vdev = STAILQ_FIRST(&spa->spa_vdevs);
 	if (vdev == NULL)
 		return (NULL);
 	for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL;
 	     kid = STAILQ_FIRST(&vdev->v_children))
 		vdev = kid;
 	return (vdev);
 }
 #endif
 
 static spa_t *
 spa_create(uint64_t guid)
 {
 	spa_t *spa;
 
 	spa = malloc(sizeof(spa_t));
 	memset(spa, 0, sizeof(spa_t));
 	STAILQ_INIT(&spa->spa_vdevs);
 	spa->spa_guid = guid;
 	STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link);
 
 	return (spa);
 }
 
 static const char *
 state_name(vdev_state_t state)
 {
 	static const char* names[] = {
 		"UNKNOWN",
 		"CLOSED",
 		"OFFLINE",
 		"REMOVED",
 		"CANT_OPEN",
 		"FAULTED",
 		"DEGRADED",
 		"ONLINE"
 	};
 	return names[state];
 }
 
 #ifdef BOOT2
 
 #define pager_printf printf
 
 #else
 
 static void
 pager_printf(const char *fmt, ...)
 {
 	char line[80];
 	va_list args;
 
 	va_start(args, fmt);
 	vsprintf(line, fmt, args);
 	va_end(args);
 	pager_output(line);
 }
 
 #endif
 
 #define STATUS_FORMAT	"        %s %s\n"
 
 static void
 print_state(int indent, const char *name, vdev_state_t state)
 {
 	int i;
 	char buf[512];
 
 	buf[0] = 0;
 	for (i = 0; i < indent; i++)
 		strcat(buf, "  ");
 	strcat(buf, name);
 	pager_printf(STATUS_FORMAT, buf, state_name(state));
 	
 }
 
 static void
 vdev_status(vdev_t *vdev, int indent)
 {
 	vdev_t *kid;
 	print_state(indent, vdev->v_name, vdev->v_state);
 
 	STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
 		vdev_status(kid, indent + 1);
 	}
 }
 
 static void
 spa_status(spa_t *spa)
 {
 	static char bootfs[ZFS_MAXNAMELEN];
 	uint64_t rootid;
 	vdev_t *vdev;
 	int good_kids, bad_kids, degraded_kids;
 	vdev_state_t state;
 
 	pager_printf("  pool: %s\n", spa->spa_name);
 	if (zfs_get_root(spa, &rootid) == 0 &&
 	    zfs_rlookup(spa, rootid, bootfs) == 0) {
 		if (bootfs[0] == '\0')
 			pager_printf("bootfs: %s\n", spa->spa_name);
 		else
 			pager_printf("bootfs: %s/%s\n", spa->spa_name, bootfs);
 	}
 	pager_printf("config:\n\n");
 	pager_printf(STATUS_FORMAT, "NAME", "STATE");
 
 	good_kids = 0;
 	degraded_kids = 0;
 	bad_kids = 0;
 	STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink) {
 		if (vdev->v_state == VDEV_STATE_HEALTHY)
 			good_kids++;
 		else if (vdev->v_state == VDEV_STATE_DEGRADED)
 			degraded_kids++;
 		else
 			bad_kids++;
 	}
 
 	state = VDEV_STATE_CLOSED;
 	if (good_kids > 0 && (degraded_kids + bad_kids) == 0)
 		state = VDEV_STATE_HEALTHY;
 	else if ((good_kids + degraded_kids) > 0)
 		state = VDEV_STATE_DEGRADED;
 
 	print_state(0, spa->spa_name, state);
 	STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink) {
 		vdev_status(vdev, 1);
 	}
 }
 
 static void
 spa_all_status(void)
 {
 	spa_t *spa;
 	int first = 1;
 
 	STAILQ_FOREACH(spa, &zfs_pools, spa_link) {
 		if (!first)
 			pager_printf("\n");
 		first = 0;
 		spa_status(spa);
 	}
 }
 
 static int
 vdev_probe(vdev_phys_read_t *read, void *read_priv, spa_t **spap)
 {
 	vdev_t vtmp;
 	vdev_phys_t *vdev_label = (vdev_phys_t *) zap_scratch;
 	spa_t *spa;
 	vdev_t *vdev, *top_vdev, *pool_vdev;
 	off_t off;
 	blkptr_t bp;
 	const unsigned char *nvlist;
 	uint64_t val;
 	uint64_t guid;
 	uint64_t pool_txg, pool_guid;
 	uint64_t is_log;
 	const char *pool_name;
 	const unsigned char *vdevs;
 	const unsigned char *features;
 	int i, rc, is_newer;
 	char *upbuf;
 	const struct uberblock *up;
 
 	/*
 	 * Load the vdev label and figure out which
 	 * uberblock is most current.
 	 */
 	memset(&vtmp, 0, sizeof(vtmp));
 	vtmp.v_phys_read = read;
 	vtmp.v_read_priv = read_priv;
 	off = offsetof(vdev_label_t, vl_vdev_phys);
 	BP_ZERO(&bp);
 	BP_SET_LSIZE(&bp, sizeof(vdev_phys_t));
 	BP_SET_PSIZE(&bp, sizeof(vdev_phys_t));
 	BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
 	BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
 	DVA_SET_OFFSET(BP_IDENTITY(&bp), off);
 	ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
 	if (vdev_read_phys(&vtmp, &bp, vdev_label, off, 0))
 		return (EIO);
 
 	if (vdev_label->vp_nvlist[0] != NV_ENCODE_XDR) {
 		return (EIO);
 	}
 
 	nvlist = (const unsigned char *) vdev_label->vp_nvlist + 4;
 
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_VERSION,
 			DATA_TYPE_UINT64, 0, &val)) {
 		return (EIO);
 	}
 
 	if (!SPA_VERSION_IS_SUPPORTED(val)) {
 		printf("ZFS: unsupported ZFS version %u (should be %u)\n",
 		    (unsigned) val, (unsigned) SPA_VERSION);
 		return (EIO);
 	}
 
 	/* Check ZFS features for read */
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_FEATURES_FOR_READ,
 			DATA_TYPE_NVLIST, 0, &features) == 0
 	    && nvlist_check_features_for_read(features) != 0)
 		return (EIO);
 
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_POOL_STATE,
 			DATA_TYPE_UINT64, 0, &val)) {
 		return (EIO);
 	}
 
 	if (val == POOL_STATE_DESTROYED) {
 		/* We don't boot only from destroyed pools. */
 		return (EIO);
 	}
 
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_POOL_TXG,
 			DATA_TYPE_UINT64, 0, &pool_txg)
 	    || nvlist_find(nvlist,
 			   ZPOOL_CONFIG_POOL_GUID,
 			   DATA_TYPE_UINT64, 0, &pool_guid)
 	    || nvlist_find(nvlist,
 			   ZPOOL_CONFIG_POOL_NAME,
 			   DATA_TYPE_STRING, 0, &pool_name)) {
 		/*
 		 * Cache and spare devices end up here - just ignore
 		 * them.
 		 */
 		/*printf("ZFS: can't find pool details\n");*/
 		return (EIO);
 	}
 
 	is_log = 0;
 	(void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, 0,
 	    &is_log);
 	if (is_log)
 		return (EIO);
 
 	/*
 	 * Create the pool if this is the first time we've seen it.
 	 */
 	spa = spa_find_by_guid(pool_guid);
 	if (!spa) {
 		spa = spa_create(pool_guid);
 		spa->spa_name = strdup(pool_name);
 	}
 	if (pool_txg > spa->spa_txg) {
 		spa->spa_txg = pool_txg;
 		is_newer = 1;
 	} else
 		is_newer = 0;
 
 	/*
 	 * Get the vdev tree and create our in-core copy of it.
 	 * If we already have a vdev with this guid, this must
 	 * be some kind of alias (overlapping slices, dangerously dedicated
 	 * disks etc).
 	 */
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_GUID,
 			DATA_TYPE_UINT64, 0, &guid)) {
 		return (EIO);
 	}
 	vdev = vdev_find(guid);
 	if (vdev && vdev->v_phys_read)	/* Has this vdev already been inited? */
 		return (EIO);
 
 	if (nvlist_find(nvlist,
 			ZPOOL_CONFIG_VDEV_TREE,
 			DATA_TYPE_NVLIST, 0, &vdevs)) {
 		return (EIO);
 	}
 
 	rc = vdev_init_from_nvlist(vdevs, NULL, &top_vdev, is_newer);
 	if (rc)
 		return (rc);
 
 	/*
 	 * Add the toplevel vdev to the pool if its not already there.
 	 */
 	STAILQ_FOREACH(pool_vdev, &spa->spa_vdevs, v_childlink)
 		if (top_vdev == pool_vdev)
 			break;
 	if (!pool_vdev && top_vdev)
 		STAILQ_INSERT_TAIL(&spa->spa_vdevs, top_vdev, v_childlink);
 
 	/*
 	 * We should already have created an incomplete vdev for this
 	 * vdev. Find it and initialise it with our read proc.
 	 */
 	vdev = vdev_find(guid);
 	if (vdev) {
 		vdev->v_phys_read = read;
 		vdev->v_read_priv = read_priv;
 		vdev->v_state = VDEV_STATE_HEALTHY;
 	} else {
 		printf("ZFS: inconsistent nvlist contents\n");
 		return (EIO);
 	}
 
 	/*
 	 * Re-evaluate top-level vdev state.
 	 */
 	vdev_set_state(top_vdev);
 
 	/*
 	 * Ok, we are happy with the pool so far. Lets find
 	 * the best uberblock and then we can actually access
 	 * the contents of the pool.
 	 */
 	upbuf = zfs_alloc(VDEV_UBERBLOCK_SIZE(vdev));
 	up = (const struct uberblock *)upbuf;
 	for (i = 0;
 	     i < VDEV_UBERBLOCK_COUNT(vdev);
 	     i++) {
 		off = VDEV_UBERBLOCK_OFFSET(vdev, i);
 		BP_ZERO(&bp);
 		DVA_SET_OFFSET(&bp.blk_dva[0], off);
 		BP_SET_LSIZE(&bp, VDEV_UBERBLOCK_SIZE(vdev));
 		BP_SET_PSIZE(&bp, VDEV_UBERBLOCK_SIZE(vdev));
 		BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
 		BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
 		ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
 
 		if (vdev_read_phys(vdev, &bp, upbuf, off, 0))
 			continue;
 
 		if (up->ub_magic != UBERBLOCK_MAGIC)
 			continue;
 		if (up->ub_txg < spa->spa_txg)
 			continue;
 		if (up->ub_txg > spa->spa_uberblock.ub_txg) {
 			spa->spa_uberblock = *up;
 		} else if (up->ub_txg == spa->spa_uberblock.ub_txg) {
 			if (up->ub_timestamp > spa->spa_uberblock.ub_timestamp)
 				spa->spa_uberblock = *up;
 		}
 	}
 	zfs_free(upbuf, VDEV_UBERBLOCK_SIZE(vdev));
 
+	vdev->spa = spa;
 	if (spap)
 		*spap = spa;
 	return (0);
 }
 
 static int
 ilog2(int n)
 {
 	int v;
 
 	for (v = 0; v < 32; v++)
 		if (n == (1 << v))
 			return v;
 	return -1;
 }
 
 static int
 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf)
 {
 	blkptr_t gbh_bp;
 	zio_gbh_phys_t zio_gb;
 	char *pbuf;
 	int i;
 
 	/* Artificial BP for gang block header. */
 	gbh_bp = *bp;
 	BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
 	BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
 	BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER);
 	BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF);
 	for (i = 0; i < SPA_DVAS_PER_BP; i++)
 		DVA_SET_GANG(&gbh_bp.blk_dva[i], 0);
 
 	/* Read gang header block using the artificial BP. */
 	if (zio_read(spa, &gbh_bp, &zio_gb))
 		return (EIO);
 
 	pbuf = buf;
 	for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
 		blkptr_t *gbp = &zio_gb.zg_blkptr[i];
 
 		if (BP_IS_HOLE(gbp))
 			continue;
 		if (zio_read(spa, gbp, pbuf))
 			return (EIO);
 		pbuf += BP_GET_PSIZE(gbp);
 	}
 
-	if (zio_checksum_verify(bp, buf))
+	if (zio_checksum_verify(spa, bp, buf))
 		return (EIO);
 	return (0);
 }
 
 static int
 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf)
 {
 	int cpfunc = BP_GET_COMPRESS(bp);
 	uint64_t align, size;
 	void *pbuf;
 	int i, error;
 
 	/*
 	 * Process data embedded in block pointer
 	 */
 	if (BP_IS_EMBEDDED(bp)) {
 		ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
 
 		size = BPE_GET_PSIZE(bp);
 		ASSERT(size <= BPE_PAYLOAD_SIZE);
 
 		if (cpfunc != ZIO_COMPRESS_OFF)
 			pbuf = zfs_alloc(size);
 		else
 			pbuf = buf;
 
 		decode_embedded_bp_compressed(bp, pbuf);
 		error = 0;
 
 		if (cpfunc != ZIO_COMPRESS_OFF) {
 			error = zio_decompress_data(cpfunc, pbuf,
 			    size, buf, BP_GET_LSIZE(bp));
 			zfs_free(pbuf, size);
 		}
 		if (error != 0)
 			printf("ZFS: i/o error - unable to decompress block pointer data, error %d\n",
 			    error);
 		return (error);
 	}
 
 	error = EIO;
 
 	for (i = 0; i < SPA_DVAS_PER_BP; i++) {
 		const dva_t *dva = &bp->blk_dva[i];
 		vdev_t *vdev;
 		int vdevid;
 		off_t offset;
 
 		if (!dva->dva_word[0] && !dva->dva_word[1])
 			continue;
 
 		vdevid = DVA_GET_VDEV(dva);
 		offset = DVA_GET_OFFSET(dva);
 		STAILQ_FOREACH(vdev, &spa->spa_vdevs, v_childlink) {
 			if (vdev->v_id == vdevid)
 				break;
 		}
 		if (!vdev || !vdev->v_read)
 			continue;
 
 		size = BP_GET_PSIZE(bp);
 		if (vdev->v_read == vdev_raidz_read) {
 			align = 1ULL << vdev->v_top->v_ashift;
 			if (P2PHASE(size, align) != 0)
 				size = P2ROUNDUP(size, align);
 		}
 		if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF)
 			pbuf = zfs_alloc(size);
 		else
 			pbuf = buf;
 
 		if (DVA_GET_GANG(dva))
 			error = zio_read_gang(spa, bp, pbuf);
 		else
 			error = vdev->v_read(vdev, bp, pbuf, offset, size);
 		if (error == 0) {
 			if (cpfunc != ZIO_COMPRESS_OFF)
 				error = zio_decompress_data(cpfunc, pbuf,
 				    BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp));
 			else if (size != BP_GET_PSIZE(bp))
 				bcopy(pbuf, buf, BP_GET_PSIZE(bp));
 		}
 		if (buf != pbuf)
 			zfs_free(pbuf, size);
 		if (error == 0)
 			break;
 	}
 	if (error != 0)
 		printf("ZFS: i/o error - all block copies unavailable\n");
 	return (error);
 }
 
 static int
 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, void *buf, size_t buflen)
 {
 	int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
 	int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
 	int nlevels = dnode->dn_nlevels;
 	int i, rc;
 
 	if (bsize > SPA_MAXBLOCKSIZE) {
-		printf("ZFS: I/O error - blocks larger than 128K are not supported\n");
+		printf("ZFS: I/O error - blocks larger than %llu are not "
+		    "supported\n", SPA_MAXBLOCKSIZE);
 		return (EIO);
 	}
 
 	/*
 	 * Note: bsize may not be a power of two here so we need to do an
 	 * actual divide rather than a bitshift.
 	 */
 	while (buflen > 0) {
 		uint64_t bn = offset / bsize;
 		int boff = offset % bsize;
 		int ibn;
 		const blkptr_t *indbp;
 		blkptr_t bp;
 
 		if (bn > dnode->dn_maxblkid)
 			return (EIO);
 
 		if (dnode == dnode_cache_obj && bn == dnode_cache_bn)
 			goto cached;
 
 		indbp = dnode->dn_blkptr;
 		for (i = 0; i < nlevels; i++) {
 			/*
 			 * Copy the bp from the indirect array so that
 			 * we can re-use the scratch buffer for multi-level
 			 * objects.
 			 */
 			ibn = bn >> ((nlevels - i - 1) * ibshift);
 			ibn &= ((1 << ibshift) - 1);
 			bp = indbp[ibn];
 			if (BP_IS_HOLE(&bp)) {
 				memset(dnode_cache_buf, 0, bsize);
 				break;
 			}
 			rc = zio_read(spa, &bp, dnode_cache_buf);
 			if (rc)
 				return (rc);
 			indbp = (const blkptr_t *) dnode_cache_buf;
 		}
 		dnode_cache_obj = dnode;
 		dnode_cache_bn = bn;
 	cached:
 
 		/*
 		 * The buffer contains our data block. Copy what we
 		 * need from it and loop.
 		 */ 
 		i = bsize - boff;
 		if (i > buflen) i = buflen;
 		memcpy(buf, &dnode_cache_buf[boff], i);
 		buf = ((char*) buf) + i;
 		offset += i;
 		buflen -= i;
 	}
 
 	return (0);
 }
 
 /*
  * Lookup a value in a microzap directory. Assumes that the zap
  * scratch buffer contains the directory contents.
  */
 static int
 mzap_lookup(const dnode_phys_t *dnode, const char *name, uint64_t *value)
 {
 	const mzap_phys_t *mz;
 	const mzap_ent_phys_t *mze;
 	size_t size;
 	int chunks, i;
 
 	/*
 	 * Microzap objects use exactly one block. Read the whole
 	 * thing.
 	 */
 	size = dnode->dn_datablkszsec * 512;
 
 	mz = (const mzap_phys_t *) zap_scratch;
 	chunks = size / MZAP_ENT_LEN - 1;
 
 	for (i = 0; i < chunks; i++) {
 		mze = &mz->mz_chunk[i];
 		if (!strcmp(mze->mze_name, name)) {
 			*value = mze->mze_value;
 			return (0);
 		}
 	}
 
 	return (ENOENT);
 }
 
 /*
  * Compare a name with a zap leaf entry. Return non-zero if the name
  * matches.
  */
 static int
 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, const char *name)
 {
 	size_t namelen;
 	const zap_leaf_chunk_t *nc;
 	const char *p;
 
 	namelen = zc->l_entry.le_name_numints;
 			
 	nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
 	p = name;
 	while (namelen > 0) {
 		size_t len;
 		len = namelen;
 		if (len > ZAP_LEAF_ARRAY_BYTES)
 			len = ZAP_LEAF_ARRAY_BYTES;
 		if (memcmp(p, nc->l_array.la_array, len))
 			return (0);
 		p += len;
 		namelen -= len;
 		nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
 	}
 
 	return 1;
 }
 
 /*
  * Extract a uint64_t value from a zap leaf entry.
  */
 static uint64_t
 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc)
 {
 	const zap_leaf_chunk_t *vc;
 	int i;
 	uint64_t value;
 	const uint8_t *p;
 
 	vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk);
 	for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) {
 		value = (value << 8) | p[i];
 	}
 
 	return value;
 }
 
+static void
+stv(int len, void *addr, uint64_t value)
+{
+	switch (len) {
+	case 1:
+		*(uint8_t *)addr = value;
+		return;
+	case 2:
+		*(uint16_t *)addr = value;
+		return;
+	case 4:
+		*(uint32_t *)addr = value;
+		return;
+	case 8:
+		*(uint64_t *)addr = value;
+		return;
+	}
+}
+
 /*
+ * Extract a array from a zap leaf entry.
+ */
+static void
+fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
+    uint64_t integer_size, uint64_t num_integers, void *buf)
+{
+	uint64_t array_int_len = zc->l_entry.le_value_intlen;
+	uint64_t value = 0;
+	uint64_t *u64 = buf;
+	char *p = buf;
+	int len = MIN(zc->l_entry.le_value_numints, num_integers);
+	int chunk = zc->l_entry.le_value_chunk;
+	int byten = 0;
+
+	if (integer_size == 8 && len == 1) {
+		*u64 = fzap_leaf_value(zl, zc);
+		return;
+	}
+
+	while (len > 0) {
+		struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array;
+		int i;
+
+		ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl));
+		for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
+			value = (value << 8) | la->la_array[i];
+			byten++;
+			if (byten == array_int_len) {
+				stv(integer_size, p, value);
+				byten = 0;
+				len--;
+				if (len == 0)
+					return;
+				p += integer_size;
+			}
+		}
+		chunk = la->la_next;
+	}
+}
+
+/*
  * Lookup a value in a fatzap directory. Assumes that the zap scratch
  * buffer contains the directory header.
  */
 static int
-fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t *value)
+fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name,
+    uint64_t integer_size, uint64_t num_integers, void *value)
 {
 	int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
 	zap_phys_t zh = *(zap_phys_t *) zap_scratch;
 	fat_zap_t z;
 	uint64_t *ptrtbl;
 	uint64_t hash;
 	int rc;
 
 	if (zh.zap_magic != ZAP_MAGIC)
 		return (EIO);
 
 	z.zap_block_shift = ilog2(bsize);
 	z.zap_phys = (zap_phys_t *) zap_scratch;
 
 	/*
 	 * Figure out where the pointer table is and read it in if necessary.
 	 */
 	if (zh.zap_ptrtbl.zt_blk) {
 		rc = dnode_read(spa, dnode, zh.zap_ptrtbl.zt_blk * bsize,
 			       zap_scratch, bsize);
 		if (rc)
 			return (rc);
 		ptrtbl = (uint64_t *) zap_scratch;
 	} else {
 		ptrtbl = &ZAP_EMBEDDED_PTRTBL_ENT(&z, 0);
 	}
 
 	hash = zap_hash(zh.zap_salt, name);
 
 	zap_leaf_t zl;
 	zl.l_bs = z.zap_block_shift;
 
 	off_t off = ptrtbl[hash >> (64 - zh.zap_ptrtbl.zt_shift)] << zl.l_bs;
 	zap_leaf_chunk_t *zc;
 
 	rc = dnode_read(spa, dnode, off, zap_scratch, bsize);
 	if (rc)
 		return (rc);
 
 	zl.l_phys = (zap_leaf_phys_t *) zap_scratch;
 
 	/*
 	 * Make sure this chunk matches our hash.
 	 */
 	if (zl.l_phys->l_hdr.lh_prefix_len > 0
 	    && zl.l_phys->l_hdr.lh_prefix
 	    != hash >> (64 - zl.l_phys->l_hdr.lh_prefix_len))
 		return (ENOENT);
 
 	/*
 	 * Hash within the chunk to find our entry.
 	 */
 	int shift = (64 - ZAP_LEAF_HASH_SHIFT(&zl) - zl.l_phys->l_hdr.lh_prefix_len);
 	int h = (hash >> shift) & ((1 << ZAP_LEAF_HASH_SHIFT(&zl)) - 1);
 	h = zl.l_phys->l_hash[h];
 	if (h == 0xffff)
 		return (ENOENT);
 	zc = &ZAP_LEAF_CHUNK(&zl, h);
 	while (zc->l_entry.le_hash != hash) {
 		if (zc->l_entry.le_next == 0xffff) {
 			zc = 0;
 			break;
 		}
 		zc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_next);
 	}
 	if (fzap_name_equal(&zl, zc, name)) {
-		if (zc->l_entry.le_value_intlen * zc->l_entry.le_value_numints > 8)
+		if (zc->l_entry.le_value_intlen * zc->l_entry.le_value_numints >
+		    integer_size * num_integers)
 			return (E2BIG);
-		*value = fzap_leaf_value(&zl, zc);
+		fzap_leaf_array(&zl, zc, integer_size, num_integers, value);
 		return (0);
 	}
 
 	return (ENOENT);
 }
 
 /*
  * Lookup a name in a zap object and return its value as a uint64_t.
  */
 static int
-zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t *value)
+zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name,
+    uint64_t integer_size, uint64_t num_integers, void *value)
 {
 	int rc;
 	uint64_t zap_type;
 	size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
 
 	rc = dnode_read(spa, dnode, 0, zap_scratch, size);
 	if (rc)
 		return (rc);
 
 	zap_type = *(uint64_t *) zap_scratch;
 	if (zap_type == ZBT_MICRO)
 		return mzap_lookup(dnode, name, value);
-	else if (zap_type == ZBT_HEADER)
-		return fzap_lookup(spa, dnode, name, value);
+	else if (zap_type == ZBT_HEADER) {
+		return fzap_lookup(spa, dnode, name, integer_size,
+		    num_integers, value);
+	}
 	printf("ZFS: invalid zap_type=%d\n", (int)zap_type);
 	return (EIO);
 }
 
 /*
  * List a microzap directory. Assumes that the zap scratch buffer contains
  * the directory contents.
  */
 static int
 mzap_list(const dnode_phys_t *dnode, int (*callback)(const char *, uint64_t))
 {
 	const mzap_phys_t *mz;
 	const mzap_ent_phys_t *mze;
 	size_t size;
 	int chunks, i, rc;
 
 	/*
 	 * Microzap objects use exactly one block. Read the whole
 	 * thing.
 	 */
 	size = dnode->dn_datablkszsec * 512;
 	mz = (const mzap_phys_t *) zap_scratch;
 	chunks = size / MZAP_ENT_LEN - 1;
 
 	for (i = 0; i < chunks; i++) {
 		mze = &mz->mz_chunk[i];
 		if (mze->mze_name[0]) {
 			rc = callback(mze->mze_name, mze->mze_value);
 			if (rc != 0)
 				return (rc);
 		}
 	}
 
 	return (0);
 }
 
 /*
  * List a fatzap directory. Assumes that the zap scratch buffer contains
  * the directory header.
  */
 static int
 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, int (*callback)(const char *, uint64_t))
 {
 	int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
 	zap_phys_t zh = *(zap_phys_t *) zap_scratch;
 	fat_zap_t z;
 	int i, j, rc;
 
 	if (zh.zap_magic != ZAP_MAGIC)
 		return (EIO);
 
 	z.zap_block_shift = ilog2(bsize);
 	z.zap_phys = (zap_phys_t *) zap_scratch;
 
 	/*
 	 * This assumes that the leaf blocks start at block 1. The
 	 * documentation isn't exactly clear on this.
 	 */
 	zap_leaf_t zl;
 	zl.l_bs = z.zap_block_shift;
 	for (i = 0; i < zh.zap_num_leafs; i++) {
 		off_t off = (i + 1) << zl.l_bs;
 		char name[256], *p;
 		uint64_t value;
 
 		if (dnode_read(spa, dnode, off, zap_scratch, bsize))
 			return (EIO);
 
 		zl.l_phys = (zap_leaf_phys_t *) zap_scratch;
 
 		for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
 			zap_leaf_chunk_t *zc, *nc;
 			int namelen;
 
 			zc = &ZAP_LEAF_CHUNK(&zl, j);
 			if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
 				continue;
 			namelen = zc->l_entry.le_name_numints;
 			if (namelen > sizeof(name))
 				namelen = sizeof(name);
 
 			/*
 			 * Paste the name back together.
 			 */
 			nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk);
 			p = name;
 			while (namelen > 0) {
 				int len;
 				len = namelen;
 				if (len > ZAP_LEAF_ARRAY_BYTES)
 					len = ZAP_LEAF_ARRAY_BYTES;
 				memcpy(p, nc->l_array.la_array, len);
 				p += len;
 				namelen -= len;
 				nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next);
 			}
 
 			/*
 			 * Assume the first eight bytes of the value are
 			 * a uint64_t.
 			 */
 			value = fzap_leaf_value(&zl, zc);
 
 			//printf("%s 0x%jx\n", name, (uintmax_t)value);
 			rc = callback((const char *)name, value);
 			if (rc != 0)
 				return (rc);
 		}
 	}
 
 	return (0);
 }
 
 static int zfs_printf(const char *name, uint64_t value __unused)
 {
 
 	printf("%s\n", name);
 
 	return (0);
 }
 
 /*
  * List a zap directory.
  */
 static int
 zap_list(const spa_t *spa, const dnode_phys_t *dnode)
 {
 	uint64_t zap_type;
 	size_t size = dnode->dn_datablkszsec * 512;
 
 	if (dnode_read(spa, dnode, 0, zap_scratch, size))
 		return (EIO);
 
 	zap_type = *(uint64_t *) zap_scratch;
 	if (zap_type == ZBT_MICRO)
 		return mzap_list(dnode, zfs_printf);
 	else
 		return fzap_list(spa, dnode, zfs_printf);
 }
 
 static int
 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, dnode_phys_t *dnode)
 {
 	off_t offset;
 
 	offset = objnum * sizeof(dnode_phys_t);
 	return dnode_read(spa, &os->os_meta_dnode, offset,
 		dnode, sizeof(dnode_phys_t));
 }
 
 static int
 mzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, uint64_t value)
 {
 	const mzap_phys_t *mz;
 	const mzap_ent_phys_t *mze;
 	size_t size;
 	int chunks, i;
 
 	/*
 	 * Microzap objects use exactly one block. Read the whole
 	 * thing.
 	 */
 	size = dnode->dn_datablkszsec * 512;
 
 	mz = (const mzap_phys_t *) zap_scratch;
 	chunks = size / MZAP_ENT_LEN - 1;
 
 	for (i = 0; i < chunks; i++) {
 		mze = &mz->mz_chunk[i];
 		if (value == mze->mze_value) {
 			strcpy(name, mze->mze_name);
 			return (0);
 		}
 	}
 
 	return (ENOENT);
 }
 
 static void
 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name)
 {
 	size_t namelen;
 	const zap_leaf_chunk_t *nc;
 	char *p;
 
 	namelen = zc->l_entry.le_name_numints;
 
 	nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
 	p = name;
 	while (namelen > 0) {
 		size_t len;
 		len = namelen;
 		if (len > ZAP_LEAF_ARRAY_BYTES)
 			len = ZAP_LEAF_ARRAY_BYTES;
 		memcpy(p, nc->l_array.la_array, len);
 		p += len;
 		namelen -= len;
 		nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
 	}
 
 	*p = '\0';
 }
 
 static int
 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, uint64_t value)
 {
 	int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
 	zap_phys_t zh = *(zap_phys_t *) zap_scratch;
 	fat_zap_t z;
 	int i, j;
 
 	if (zh.zap_magic != ZAP_MAGIC)
 		return (EIO);
 
 	z.zap_block_shift = ilog2(bsize);
 	z.zap_phys = (zap_phys_t *) zap_scratch;
 
 	/*
 	 * This assumes that the leaf blocks start at block 1. The
 	 * documentation isn't exactly clear on this.
 	 */
 	zap_leaf_t zl;
 	zl.l_bs = z.zap_block_shift;
 	for (i = 0; i < zh.zap_num_leafs; i++) {
 		off_t off = (i + 1) << zl.l_bs;
 
 		if (dnode_read(spa, dnode, off, zap_scratch, bsize))
 			return (EIO);
 
 		zl.l_phys = (zap_leaf_phys_t *) zap_scratch;
 
 		for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
 			zap_leaf_chunk_t *zc;
 
 			zc = &ZAP_LEAF_CHUNK(&zl, j);
 			if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
 				continue;
 			if (zc->l_entry.le_value_intlen != 8 ||
 			    zc->l_entry.le_value_numints != 1)
 				continue;
 
 			if (fzap_leaf_value(&zl, zc) == value) {
 				fzap_name_copy(&zl, zc, name);
 				return (0);
 			}
 		}
 	}
 
 	return (ENOENT);
 }
 
 static int
 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, uint64_t value)
 {
 	int rc;
 	uint64_t zap_type;
 	size_t size = dnode->dn_datablkszsec * 512;
 
 	rc = dnode_read(spa, dnode, 0, zap_scratch, size);
 	if (rc)
 		return (rc);
 
 	zap_type = *(uint64_t *) zap_scratch;
 	if (zap_type == ZBT_MICRO)
 		return mzap_rlookup(spa, dnode, name, value);
 	else
 		return fzap_rlookup(spa, dnode, name, value);
 }
 
 static int
 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result)
 {
 	char name[256];
 	char component[256];
 	uint64_t dir_obj, parent_obj, child_dir_zapobj;
 	dnode_phys_t child_dir_zap, dataset, dir, parent;
 	dsl_dir_phys_t *dd;
 	dsl_dataset_phys_t *ds;
 	char *p;
 	int len;
 
 	p = &name[sizeof(name) - 1];
 	*p = '\0';
 
 	if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
 		printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
 		return (EIO);
 	}
 	ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
 	dir_obj = ds->ds_dir_obj;
 
 	for (;;) {
 		if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0)
 			return (EIO);
 		dd = (dsl_dir_phys_t *)&dir.dn_bonus;
 
 		/* Actual loop condition. */
 		parent_obj  = dd->dd_parent_obj;
 		if (parent_obj == 0)
 			break;
 
 		if (objset_get_dnode(spa, &spa->spa_mos, parent_obj, &parent) != 0)
 			return (EIO);
 		dd = (dsl_dir_phys_t *)&parent.dn_bonus;
 		child_dir_zapobj = dd->dd_child_dir_zapobj;
 		if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0)
 			return (EIO);
 		if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0)
 			return (EIO);
 
 		len = strlen(component);
 		p -= len;
 		memcpy(p, component, len);
 		--p;
 		*p = '/';
 
 		/* Actual loop iteration. */
 		dir_obj = parent_obj;
 	}
 
 	if (*p != '\0')
 		++p;
 	strcpy(result, p);
 
 	return (0);
 }
 
 static int
 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum)
 {
 	char element[256];
 	uint64_t dir_obj, child_dir_zapobj;
 	dnode_phys_t child_dir_zap, dir;
 	dsl_dir_phys_t *dd;
 	const char *p, *q;
 
 	if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir))
 		return (EIO);
-	if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, &dir_obj))
+	if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj),
+	    1, &dir_obj))
 		return (EIO);
 
 	p = name;
 	for (;;) {
 		if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir))
 			return (EIO);
 		dd = (dsl_dir_phys_t *)&dir.dn_bonus;
 
 		while (*p == '/')
 			p++;
 		/* Actual loop condition #1. */
 		if (*p == '\0')
 			break;
 
 		q = strchr(p, '/');
 		if (q) {
 			memcpy(element, p, q - p);
 			element[q - p] = '\0';
 			p = q + 1;
 		} else {
 			strcpy(element, p);
 			p += strlen(p);
 		}
 
 		child_dir_zapobj = dd->dd_child_dir_zapobj;
 		if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0)
 			return (EIO);
 
 		/* Actual loop condition #2. */
-		if (zap_lookup(spa, &child_dir_zap, element, &dir_obj) != 0)
+		if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj),
+		    1, &dir_obj) != 0)
 			return (ENOENT);
 	}
 
 	*objnum = dd->dd_head_dataset_obj;
 	return (0);
 }
 
 #ifndef BOOT2
 static int
 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/)
 {
 	uint64_t dir_obj, child_dir_zapobj;
 	dnode_phys_t child_dir_zap, dir, dataset;
 	dsl_dataset_phys_t *ds;
 	dsl_dir_phys_t *dd;
 
 	if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
 		printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
 		return (EIO);
 	}
 	ds = (dsl_dataset_phys_t *) &dataset.dn_bonus;
 	dir_obj = ds->ds_dir_obj;
 
 	if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) {
 		printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
 		return (EIO);
 	}
 	dd = (dsl_dir_phys_t *)&dir.dn_bonus;
 
 	child_dir_zapobj = dd->dd_child_dir_zapobj;
 	if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0) {
 		printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
 		return (EIO);
 	}
 
 	return (zap_list(spa, &child_dir_zap) != 0);
 }
 
 int
 zfs_callback_dataset(const spa_t *spa, uint64_t objnum, int (*callback)(const char *, uint64_t))
 {
 	uint64_t dir_obj, child_dir_zapobj, zap_type;
 	dnode_phys_t child_dir_zap, dir, dataset;
 	dsl_dataset_phys_t *ds;
 	dsl_dir_phys_t *dd;
 	int err;
 
 	err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset);
 	if (err != 0) {
 		printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
 		return (err);
 	}
 	ds = (dsl_dataset_phys_t *) &dataset.dn_bonus;
 	dir_obj = ds->ds_dir_obj;
 
 	err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir);
 	if (err != 0) {
 		printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
 		return (err);
 	}
 	dd = (dsl_dir_phys_t *)&dir.dn_bonus;
 
 	child_dir_zapobj = dd->dd_child_dir_zapobj;
 	err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap);
 	if (err != 0) {
 		printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
 		return (err);
 	}
 
 	err = dnode_read(spa, &child_dir_zap, 0, zap_scratch, child_dir_zap.dn_datablkszsec * 512);
 	if (err != 0)
 		return (err);
 
 	zap_type = *(uint64_t *) zap_scratch;
 	if (zap_type == ZBT_MICRO)
 		return mzap_list(&child_dir_zap, callback);
 	else
 		return fzap_list(spa, &child_dir_zap, callback);
 }
 #endif
 
 /*
  * Find the object set given the object number of its dataset object
  * and return its details in *objset
  */
 static int
 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset)
 {
 	dnode_phys_t dataset;
 	dsl_dataset_phys_t *ds;
 
 	if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
 		printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
 		return (EIO);
 	}
 
 	ds = (dsl_dataset_phys_t *) &dataset.dn_bonus;
 	if (zio_read(spa, &ds->ds_bp, objset)) {
 		printf("ZFS: can't read object set for dataset %ju\n",
 		    (uintmax_t)objnum);
 		return (EIO);
 	}
 
 	return (0);
 }
 
 /*
  * Find the object set pointed to by the BOOTFS property or the root
  * dataset if there is none and return its details in *objset
  */
 static int
 zfs_get_root(const spa_t *spa, uint64_t *objid)
 {
 	dnode_phys_t dir, propdir;
 	uint64_t props, bootfs, root;
 
 	*objid = 0;
 
 	/*
 	 * Start with the MOS directory object.
 	 */
 	if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir)) {
 		printf("ZFS: can't read MOS object directory\n");
 		return (EIO);
 	}
 
 	/*
 	 * Lookup the pool_props and see if we can find a bootfs.
 	 */
-	if (zap_lookup(spa, &dir, DMU_POOL_PROPS, &props) == 0
+	if (zap_lookup(spa, &dir, DMU_POOL_PROPS, sizeof (props), 1, &props) == 0
 	     && objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0
-	     && zap_lookup(spa, &propdir, "bootfs", &bootfs) == 0
+	     && zap_lookup(spa, &propdir, "bootfs", sizeof (bootfs), 1, &bootfs) == 0
 	     && bootfs != 0)
 	{
 		*objid = bootfs;
 		return (0);
 	}
 	/*
 	 * Lookup the root dataset directory
 	 */
-	if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, &root)
+	if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (root), 1, &root)
 	    || objset_get_dnode(spa, &spa->spa_mos, root, &dir)) {
 		printf("ZFS: can't find root dsl_dir\n");
 		return (EIO);
 	}
 
 	/*
 	 * Use the information from the dataset directory's bonus buffer
 	 * to find the dataset object and from that the object set itself.
 	 */
 	dsl_dir_phys_t *dd = (dsl_dir_phys_t *) &dir.dn_bonus;
 	*objid = dd->dd_head_dataset_obj;
 	return (0);
 }
 
 static int
 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount)
 {
 
 	mount->spa = spa;
 
 	/*
 	 * Find the root object set if not explicitly provided
 	 */
 	if (rootobj == 0 && zfs_get_root(spa, &rootobj)) {
 		printf("ZFS: can't find root filesystem\n");
 		return (EIO);
 	}
 
 	if (zfs_mount_dataset(spa, rootobj, &mount->objset)) {
 		printf("ZFS: can't open root filesystem\n");
 		return (EIO);
 	}
 
 	mount->rootobj = rootobj;
 
 	return (0);
 }
 
 /*
  * callback function for feature name checks.
  */
 static int
 check_feature(const char *name, uint64_t value)
 {
 	int i;
 
 	if (value == 0)
 		return (0);
 	if (name[0] == '\0')
 		return (0);
 
 	for (i = 0; features_for_read[i] != NULL; i++) {
 		if (strcmp(name, features_for_read[i]) == 0)
 			return (0);
 	}
 	printf("ZFS: unsupported feature: %s\n", name);
 	return (EIO);
 }
 
 /*
  * Checks whether the MOS features that are active are supported.
  */
 static int
 check_mos_features(const spa_t *spa)
 {
 	dnode_phys_t dir;
 	uint64_t objnum, zap_type;
 	size_t size;
 	int rc;
 
 	if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY,
 	    &dir)) != 0)
 		return (rc);
-	if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, &objnum)) != 0)
+	if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ,
+	    sizeof (objnum), 1, &objnum)) != 0)
 		return (rc);
 
 	if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0)
 		return (rc);
 
 	if (dir.dn_type != DMU_OTN_ZAP_METADATA)
 		return (EIO);
 
 	size = dir.dn_datablkszsec * 512;
 	if (dnode_read(spa, &dir, 0, zap_scratch, size))
 		return (EIO);
 
 	zap_type = *(uint64_t *) zap_scratch;
 	if (zap_type == ZBT_MICRO)
 		rc = mzap_list(&dir, check_feature);
 	else
 		rc = fzap_list(spa, &dir, check_feature);
 
 	return (rc);
 }
 
 static int
 zfs_spa_init(spa_t *spa)
 {
+	dnode_phys_t dir;
 	int rc;
 
 	if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) {
 		printf("ZFS: can't read MOS of pool %s\n", spa->spa_name);
 		return (EIO);
 	}
 	if (spa->spa_mos.os_type != DMU_OST_META) {
 		printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name);
 		return (EIO);
 	}
 
+	if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT,
+	    &dir)) {
+		printf("ZFS: failed to read pool %s directory object\n",
+		    spa->spa_name);
+		return (EIO);
+	}
+	/* this is allowed to fail, older pools do not have salt */
+	rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1,
+	    sizeof (spa->spa_cksum_salt.zcs_bytes),
+	    spa->spa_cksum_salt.zcs_bytes);
+
 	rc = check_mos_features(spa);
 	if (rc != 0) {
 		printf("ZFS: pool %s is not supported\n", spa->spa_name);
 	}
 
 	return (rc);
 }
 
 static int
 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb)
 {
 
 	if (dn->dn_bonustype != DMU_OT_SA) {
 		znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus;
 
 		sb->st_mode = zp->zp_mode;
 		sb->st_uid = zp->zp_uid;
 		sb->st_gid = zp->zp_gid;
 		sb->st_size = zp->zp_size;
 	} else {
 		sa_hdr_phys_t *sahdrp;
 		int hdrsize;
 		size_t size = 0;
 		void *buf = NULL;
 
 		if (dn->dn_bonuslen != 0)
 			sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
 		else {
 			if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) {
 				blkptr_t *bp = &dn->dn_spill;
 				int error;
 
 				size = BP_GET_LSIZE(bp);
 				buf = zfs_alloc(size);
 				error = zio_read(spa, bp, buf);
 				if (error != 0) {
 					zfs_free(buf, size);
 					return (error);
 				}
 				sahdrp = buf;
 			} else {
 				return (EIO);
 			}
 		}
 		hdrsize = SA_HDR_SIZE(sahdrp);
 		sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize +
 		    SA_MODE_OFFSET);
 		sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize +
 		    SA_UID_OFFSET);
 		sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize +
 		    SA_GID_OFFSET);
 		sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize +
 		    SA_SIZE_OFFSET);
 		if (buf != NULL)
 			zfs_free(buf, size);
 	}
 
 	return (0);
 }
 
 /*
  * Lookup a file and return its dnode.
  */
 static int
 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode)
 {
 	int rc;
 	uint64_t objnum, rootnum, parentnum;
 	const spa_t *spa;
 	dnode_phys_t dn;
 	const char *p, *q;
 	char element[256];
 	char path[1024];
 	int symlinks_followed = 0;
 	struct stat sb;
 
 	spa = mount->spa;
 	if (mount->objset.os_type != DMU_OST_ZFS) {
 		printf("ZFS: unexpected object set type %ju\n",
 		    (uintmax_t)mount->objset.os_type);
 		return (EIO);
 	}
 
 	/*
 	 * Get the root directory dnode.
 	 */
 	rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn);
 	if (rc)
 		return (rc);
 
-	rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, &rootnum);
+	rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof (rootnum), 1, &rootnum);
 	if (rc)
 		return (rc);
 
 	rc = objset_get_dnode(spa, &mount->objset, rootnum, &dn);
 	if (rc)
 		return (rc);
 
 	objnum = rootnum;
 	p = upath;
 	while (p && *p) {
 		while (*p == '/')
 			p++;
 		if (!*p)
 			break;
 		q = strchr(p, '/');
 		if (q) {
 			memcpy(element, p, q - p);
 			element[q - p] = 0;
 			p = q;
 		} else {
 			strcpy(element, p);
 			p = 0;
 		}
 
 		rc = zfs_dnode_stat(spa, &dn, &sb);
 		if (rc)
 			return (rc);
 		if (!S_ISDIR(sb.st_mode))
 			return (ENOTDIR);
 
 		parentnum = objnum;
-		rc = zap_lookup(spa, &dn, element, &objnum);
+		rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum);
 		if (rc)
 			return (rc);
 		objnum = ZFS_DIRENT_OBJ(objnum);
 
 		rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
 		if (rc)
 			return (rc);
 
 		/*
 		 * Check for symlink.
 		 */
 		rc = zfs_dnode_stat(spa, &dn, &sb);
 		if (rc)
 			return (rc);
 		if (S_ISLNK(sb.st_mode)) {
 			if (symlinks_followed > 10)
 				return (EMLINK);
 			symlinks_followed++;
 
 			/*
 			 * Read the link value and copy the tail of our
 			 * current path onto the end.
 			 */
 			if (p)
 				strcpy(&path[sb.st_size], p);
 			else
 				path[sb.st_size] = 0;
 			/*
 			 * Second test is purely to silence bogus compiler
 			 * warning about accessing past the end of dn_bonus.
 			 */
 			if (sb.st_size + sizeof(znode_phys_t) <=
 			    dn.dn_bonuslen && sizeof(znode_phys_t) <=
 			    sizeof(dn.dn_bonus)) {
 				memcpy(path, &dn.dn_bonus[sizeof(znode_phys_t)],
 					sb.st_size);
 			} else {
 				rc = dnode_read(spa, &dn, 0, path, sb.st_size);
 				if (rc)
 					return (rc);
 			}
 
 			/*
 			 * Restart with the new path, starting either at
 			 * the root or at the parent depending whether or
 			 * not the link is relative.
 			 */
 			p = path;
 			if (*p == '/')
 				objnum = rootnum;
 			else
 				objnum = parentnum;
 			objset_get_dnode(spa, &mount->objset, objnum, &dn);
 		}
 	}
 
 	*dnode = dn;
 	return (0);
 }
Index: head/sys/cddl/boot/zfs/fletcher.c
===================================================================
--- head/sys/cddl/boot/zfs/fletcher.c	(revision 304320)
+++ head/sys/cddl/boot/zfs/fletcher.c	(revision 304321)
@@ -1,94 +1,96 @@
 /*
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
  * Common Development and Distribution License (the "License").
  * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
  * See the License for the specific language governing permissions
  * and limitations under the License.
  *
  * When distributing Covered Code, include this CDDL HEADER in each
  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  * If applicable, add the following below this CDDL HEADER, with the
  * fields enclosed by brackets "[]" replaced with your own identifying
  * information: Portions Copyright [yyyy] [name of copyright owner]
  *
  * CDDL HEADER END
  */
 /*
  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-/*#pragma ident	"%Z%%M%	%I%	%E% SMI"*/
-
 static void
-fletcher_2_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
+fletcher_2_native(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
 {
 	const uint64_t *ip = buf;
 	const uint64_t *ipend = ip + (size / sizeof (uint64_t));
 	uint64_t a0, b0, a1, b1;
 
 	for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) {
 		a0 += ip[0];
 		a1 += ip[1];
 		b0 += a0;
 		b1 += a1;
 	}
 
 	ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
 }
 
 static void
-fletcher_2_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
+fletcher_2_byteswap(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
 {
 	const uint64_t *ip = buf;
 	const uint64_t *ipend = ip + (size / sizeof (uint64_t));
 	uint64_t a0, b0, a1, b1;
 
 	for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) {
 		a0 += BSWAP_64(ip[0]);
 		a1 += BSWAP_64(ip[1]);
 		b0 += a0;
 		b1 += a1;
 	}
 
 	ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
 }
 
 static void
-fletcher_4_native(const void *buf, uint64_t size, zio_cksum_t *zcp)
+fletcher_4_native(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
 {
 	const uint32_t *ip = buf;
 	const uint32_t *ipend = ip + (size / sizeof (uint32_t));
 	uint64_t a, b, c, d;
 
 	for (a = b = c = d = 0; ip < ipend; ip++) {
 		a += ip[0];
 		b += a;
 		c += b;
 		d += c;
 	}
 
 	ZIO_SET_CHECKSUM(zcp, a, b, c, d);
 }
 
 static void
-fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
+fletcher_4_byteswap(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
 {
 	const uint32_t *ip = buf;
 	const uint32_t *ipend = ip + (size / sizeof (uint32_t));
 	uint64_t a, b, c, d;
 
 	for (a = b = c = d = 0; ip < ipend; ip++) {
 		a += BSWAP_32(ip[0]);
 		b += a;
 		c += b;
 		d += c;
 	}
 
 	ZIO_SET_CHECKSUM(zcp, a, b, c, d);
 }
Index: head/sys/cddl/boot/zfs/sha256.c
===================================================================
--- head/sys/cddl/boot/zfs/sha256.c	(revision 304320)
+++ head/sys/cddl/boot/zfs/sha256.c	(revision 304321)
@@ -1,127 +1,325 @@
 /*
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
  * Common Development and Distribution License, Version 1.0 only
  * (the "License").  You may not use this file except in compliance
  * with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
  * See the License for the specific language governing permissions
  * and limitations under the License.
  *
  * When distributing Covered Code, include this CDDL HEADER in each
  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  * If applicable, add the following below this CDDL HEADER, with the
  * fields enclosed by brackets "[]" replaced with your own identifying
  * information: Portions Copyright [yyyy] [name of copyright owner]
  *
  * CDDL HEADER END
  */
 /*
  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
+/*
+ * Copyright 2013 Saso Kiselkov.  All rights reserved.
+ * Copyright 2015 Toomas Soome <tsoome@me.com>
+ */
 
-/*#pragma ident	"%Z%%M%	%I%	%E% SMI"*/
-
 /*
- * SHA-256 checksum, as specified in FIPS 180-2, available at:
+ * SHA-256 and SHA-512/256 hashes, as specified in FIPS 180-4, available at:
  * http://csrc.nist.gov/cryptval
  *
- * This is a very compact implementation of SHA-256.
+ * This is a very compact implementation of SHA-256 and SHA-512/256.
  * It is designed to be simple and portable, not to be fast.
  */
 
 /*
- * The literal definitions according to FIPS180-2 would be:
+ * The literal definitions according to FIPS180-4 would be:
  *
  * 	Ch(x, y, z)     (((x) & (y)) ^ ((~(x)) & (z)))
  * 	Maj(x, y, z)    (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
  *
  * We use logical equivalents which require one less op.
  */
 #define	Ch(x, y, z)	((z) ^ ((x) & ((y) ^ (z))))
 #define	Maj(x, y, z)	(((x) & (y)) ^ ((z) & ((x) ^ (y))))
-#define	Rot32(x, s)	(((x) >> s) | ((x) << (32 - s)))
-#define	SIGMA0(x)	(Rot32(x, 2) ^ Rot32(x, 13) ^ Rot32(x, 22))
-#define	SIGMA1(x)	(Rot32(x, 6) ^ Rot32(x, 11) ^ Rot32(x, 25))
-#define	sigma0(x)	(Rot32(x, 7) ^ Rot32(x, 18) ^ ((x) >> 3))
-#define	sigma1(x)	(Rot32(x, 17) ^ Rot32(x, 19) ^ ((x) >> 10))
+#define	ROTR(x, n)	(((x) >> (n)) | ((x) << ((sizeof (x) * NBBY)-(n))))
 
+/* SHA-224/256 operations */
+#define	BIGSIGMA0_256(x)	(ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
+#define	BIGSIGMA1_256(x)	(ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
+#define	SIGMA0_256(x)		(ROTR(x, 7) ^ ROTR(x, 18) ^ ((x) >> 3))
+#define	SIGMA1_256(x)		(ROTR(x, 17) ^ ROTR(x, 19) ^ ((x) >> 10))
+
+/* SHA-384/512 operations */
+#define	BIGSIGMA0_512(x)	(ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
+#define	BIGSIGMA1_512(x)	(ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
+#define	SIGMA0_512(x)		(ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
+#define	SIGMA1_512(x)		(ROTR((x), 19) ^ ROTR((x), 61) ^ ((x) >> 6))
+
+/* SHA-256 round constants */
 static const uint32_t SHA256_K[64] = {
 	0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
 	0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
 	0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
 	0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
 	0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
 	0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
 	0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
 	0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
 	0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
 	0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
 	0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
 	0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
 	0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
 	0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
 	0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
 	0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
 };
 
+/* SHA-512 round constants */
+static const uint64_t SHA512_K[80] = {
+	0x428A2F98D728AE22ULL, 0x7137449123EF65CDULL,
+	0xB5C0FBCFEC4D3B2FULL, 0xE9B5DBA58189DBBCULL,
+	0x3956C25BF348B538ULL, 0x59F111F1B605D019ULL,
+	0x923F82A4AF194F9BULL, 0xAB1C5ED5DA6D8118ULL,
+	0xD807AA98A3030242ULL, 0x12835B0145706FBEULL,
+	0x243185BE4EE4B28CULL, 0x550C7DC3D5FFB4E2ULL,
+	0x72BE5D74F27B896FULL, 0x80DEB1FE3B1696B1ULL,
+	0x9BDC06A725C71235ULL, 0xC19BF174CF692694ULL,
+	0xE49B69C19EF14AD2ULL, 0xEFBE4786384F25E3ULL,
+	0x0FC19DC68B8CD5B5ULL, 0x240CA1CC77AC9C65ULL,
+	0x2DE92C6F592B0275ULL, 0x4A7484AA6EA6E483ULL,
+	0x5CB0A9DCBD41FBD4ULL, 0x76F988DA831153B5ULL,
+	0x983E5152EE66DFABULL, 0xA831C66D2DB43210ULL,
+	0xB00327C898FB213FULL, 0xBF597FC7BEEF0EE4ULL,
+	0xC6E00BF33DA88FC2ULL, 0xD5A79147930AA725ULL,
+	0x06CA6351E003826FULL, 0x142929670A0E6E70ULL,
+	0x27B70A8546D22FFCULL, 0x2E1B21385C26C926ULL,
+	0x4D2C6DFC5AC42AEDULL, 0x53380D139D95B3DFULL,
+	0x650A73548BAF63DEULL, 0x766A0ABB3C77B2A8ULL,
+	0x81C2C92E47EDAEE6ULL, 0x92722C851482353BULL,
+	0xA2BFE8A14CF10364ULL, 0xA81A664BBC423001ULL,
+	0xC24B8B70D0F89791ULL, 0xC76C51A30654BE30ULL,
+	0xD192E819D6EF5218ULL, 0xD69906245565A910ULL,
+	0xF40E35855771202AULL, 0x106AA07032BBD1B8ULL,
+	0x19A4C116B8D2D0C8ULL, 0x1E376C085141AB53ULL,
+	0x2748774CDF8EEB99ULL, 0x34B0BCB5E19B48A8ULL,
+	0x391C0CB3C5C95A63ULL, 0x4ED8AA4AE3418ACBULL,
+	0x5B9CCA4F7763E373ULL, 0x682E6FF3D6B2B8A3ULL,
+	0x748F82EE5DEFB2FCULL, 0x78A5636F43172F60ULL,
+	0x84C87814A1F0AB72ULL, 0x8CC702081A6439ECULL,
+	0x90BEFFFA23631E28ULL, 0xA4506CEBDE82BDE9ULL,
+	0xBEF9A3F7B2C67915ULL, 0xC67178F2E372532BULL,
+	0xCA273ECEEA26619CULL, 0xD186B8C721C0C207ULL,
+	0xEADA7DD6CDE0EB1EULL, 0xF57D4F7FEE6ED178ULL,
+	0x06F067AA72176FBAULL, 0x0A637DC5A2C898A6ULL,
+	0x113F9804BEF90DAEULL, 0x1B710B35131C471BULL,
+	0x28DB77F523047D84ULL, 0x32CAAB7B40C72493ULL,
+	0x3C9EBE0A15C9BEBCULL, 0x431D67C49C100D4CULL,
+	0x4CC5D4BECB3E42B6ULL, 0x597F299CFC657E2AULL,
+	0x5FCB6FAB3AD6FAECULL, 0x6C44198C4A475817ULL
+};
+
 static void
 SHA256Transform(uint32_t *H, const uint8_t *cp)
 {
 	uint32_t a, b, c, d, e, f, g, h, t, T1, T2, W[64];
 
-	for (t = 0; t < 16; t++, cp += 4)
+	/* copy chunk into the first 16 words of the message schedule */
+	for (t = 0; t < 16; t++, cp += sizeof (uint32_t))
 		W[t] = (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | cp[3];
 
+	/* extend the first 16 words into the remaining 48 words */
 	for (t = 16; t < 64; t++)
-		W[t] = sigma1(W[t - 2]) + W[t - 7] +
-		    sigma0(W[t - 15]) + W[t - 16];
+		W[t] = SIGMA1_256(W[t - 2]) + W[t - 7] +
+		    SIGMA0_256(W[t - 15]) + W[t - 16];
 
+	/* init working variables to the current hash value */
 	a = H[0]; b = H[1]; c = H[2]; d = H[3];
 	e = H[4]; f = H[5]; g = H[6]; h = H[7];
 
+	/* iterate the compression function for all rounds of the hash */
 	for (t = 0; t < 64; t++) {
-		T1 = h + SIGMA1(e) + Ch(e, f, g) + SHA256_K[t] + W[t];
-		T2 = SIGMA0(a) + Maj(a, b, c);
+		T1 = h + BIGSIGMA1_256(e) + Ch(e, f, g) + SHA256_K[t] + W[t];
+		T2 = BIGSIGMA0_256(a) + Maj(a, b, c);
 		h = g; g = f; f = e; e = d + T1;
 		d = c; c = b; b = a; a = T1 + T2;
 	}
 
+	/* add the compressed chunk to the current hash value */
 	H[0] += a; H[1] += b; H[2] += c; H[3] += d;
 	H[4] += e; H[5] += f; H[6] += g; H[7] += h;
 }
 
 static void
-zio_checksum_SHA256(const void *buf, uint64_t size, zio_cksum_t *zcp)
+SHA512Transform(uint64_t *H, const uint8_t *cp)
 {
-	uint32_t H[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
-	    0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
+	uint64_t a, b, c, d, e, f, g, h, t, T1, T2, W[80];
+
+	/* copy chunk into the first 16 words of the message schedule */
+	for (t = 0; t < 16; t++, cp += sizeof (uint64_t))
+		W[t] = ((uint64_t)cp[0] << 56) | ((uint64_t)cp[1] << 48) |
+		    ((uint64_t)cp[2] << 40) | ((uint64_t)cp[3] << 32) |
+		    ((uint64_t)cp[4] << 24) | ((uint64_t)cp[5] << 16) |
+		    ((uint64_t)cp[6] << 8) | (uint64_t)cp[7];
+
+	/* extend the first 16 words into the remaining 64 words */
+	for (t = 16; t < 80; t++)
+		W[t] = SIGMA1_512(W[t - 2]) + W[t - 7] +
+		    SIGMA0_512(W[t - 15]) + W[t - 16];
+
+	/* init working variables to the current hash value */
+	a = H[0]; b = H[1]; c = H[2]; d = H[3];
+	e = H[4]; f = H[5]; g = H[6]; h = H[7];
+
+	/* iterate the compression function for all rounds of the hash */
+	for (t = 0; t < 80; t++) {
+		T1 = h + BIGSIGMA1_512(e) + Ch(e, f, g) + SHA512_K[t] + W[t];
+		T2 = BIGSIGMA0_512(a) + Maj(a, b, c);
+		h = g; g = f; f = e; e = d + T1;
+		d = c; c = b; b = a; a = T1 + T2;
+	}
+
+	/* add the compressed chunk to the current hash value */
+	H[0] += a; H[1] += b; H[2] += c; H[3] += d;
+	H[4] += e; H[5] += f; H[6] += g; H[7] += h;
+}
+
+/*
+ * Implements the SHA-224 and SHA-256 hash algos - to select between them
+ * pass the appropriate initial values of 'H' and truncate the last 32 bits
+ * in case of SHA-224.
+ */
+static void
+SHA256(uint32_t *H, const void *buf, uint64_t size, zio_cksum_t *zcp)
+{
 	uint8_t pad[128];
-	int padsize = size & 63;
-	int i;
+	unsigned padsize = size & 63;
+	unsigned i, k;
 
+	/* process all blocks up to the last one */
 	for (i = 0; i < size - padsize; i += 64)
 		SHA256Transform(H, (uint8_t *)buf + i);
 
-	for (i = 0; i < padsize; i++)
-		pad[i] = ((uint8_t *)buf)[i];
+	/* process the last block and padding */
+	for (k = 0; k < padsize; k++)
+		pad[k] = ((uint8_t *)buf)[k+i];
 
 	for (pad[padsize++] = 0x80; (padsize & 63) != 56; padsize++)
 		pad[padsize] = 0;
 
 	for (i = 0; i < 8; i++)
 		pad[padsize++] = (size << 3) >> (56 - 8 * i);
 
 	for (i = 0; i < padsize; i += 64)
 		SHA256Transform(H, pad + i);
 
 	ZIO_SET_CHECKSUM(zcp,
 	    (uint64_t)H[0] << 32 | H[1],
 	    (uint64_t)H[2] << 32 | H[3],
 	    (uint64_t)H[4] << 32 | H[5],
 	    (uint64_t)H[6] << 32 | H[7]);
+}
+
+/*
+ * encode 64bit data in big-endian format.
+ */
+static void
+Encode64(uint8_t *output, uint64_t *input, size_t len)
+{
+	size_t i, j;
+	for (i = 0, j = 0; j < len; i++, j += 8) {
+		output[j]	= (input[i] >> 56) & 0xff;
+		output[j + 1]	= (input[i] >> 48) & 0xff;
+		output[j + 2]	= (input[i] >> 40) & 0xff;
+		output[j + 3]	= (input[i] >> 32) & 0xff;
+		output[j + 4]	= (input[i] >> 24) & 0xff;
+		output[j + 5]	= (input[i] >> 16) & 0xff;
+		output[j + 6]	= (input[i] >>  8) & 0xff;
+		output[j + 7]	= input[i] & 0xff;
+	}
+}
+
+/*
+ * Implements the SHA-384, SHA-512 and SHA-512/t hash algos - to select
+ * between them pass the appropriate initial values for 'H'. The output
+ * of this function is truncated to the first 256 bits that fit into 'zcp'.
+ */
+static void
+SHA512(uint64_t *H, const void *buf, uint64_t size, zio_cksum_t *zcp)
+{
+	uint64_t	c64[2];
+	uint8_t		pad[256];
+	unsigned	padsize = size & 127;
+	unsigned	i, k;
+
+	/* process all blocks up to the last one */
+	for (i = 0; i < size - padsize; i += 128)
+		SHA512Transform(H, (uint8_t *)buf + i);
+
+	/* process the last block and padding */
+	for (k = 0; k < padsize; k++)
+		pad[k] = ((uint8_t *)buf)[k+i];
+
+	if (padsize < 112) {
+		for (pad[padsize++] = 0x80; padsize < 112; padsize++)
+			pad[padsize] = 0;
+	} else {
+		for (pad[padsize++] = 0x80; padsize < 240; padsize++)
+			pad[padsize] = 0;
+	}
+
+	c64[0] = 0;
+	c64[1] = size << 3;
+	Encode64(pad+padsize, c64, sizeof (c64));
+	padsize += sizeof (c64);
+
+	for (i = 0; i < padsize; i += 128)
+		SHA512Transform(H, pad + i);
+
+	/* truncate the output to the first 256 bits which fit into 'zcp' */
+	Encode64((uint8_t *)zcp, H, sizeof (uint64_t) * 4);
+}
+
+static void
+zio_checksum_SHA256(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
+{
+	/* SHA-256 as per FIPS 180-4. */
+	uint32_t	H[] = {
+		0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+		0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
+	};
+	SHA256(H, buf, size, zcp);
+}
+
+static void
+zio_checksum_SHA512_native(const void *buf, uint64_t size,
+    const void *ctx_template __unused, zio_cksum_t *zcp)
+{
+	/* SHA-512/256 as per FIPS 180-4. */
+	uint64_t	H[] = {
+		0x22312194FC2BF72CULL, 0x9F555FA3C84C64C2ULL,
+		0x2393B86B6F53B151ULL, 0x963877195940EABDULL,
+		0x96283EE2A88EFFE3ULL, 0xBE5E1E2553863992ULL,
+		0x2B0199FC2C85B8AAULL, 0x0EB72DDC81C52CA2ULL
+	};
+	SHA512(H, buf, size, zcp);
+}
+
+static void
+zio_checksum_SHA512_byteswap(const void *buf, uint64_t size,
+    const void *ctx_template, zio_cksum_t *zcp)
+{
+	zio_cksum_t	tmp;
+
+	zio_checksum_SHA512_native(buf, size, ctx_template, &tmp);
+	zcp->zc_word[0] = BSWAP_64(tmp.zc_word[0]);
+	zcp->zc_word[1] = BSWAP_64(tmp.zc_word[1]);
+	zcp->zc_word[2] = BSWAP_64(tmp.zc_word[2]);
+	zcp->zc_word[3] = BSWAP_64(tmp.zc_word[3]);
 }
Index: head/sys/cddl/boot/zfs/skein_zfs.c
===================================================================
--- head/sys/cddl/boot/zfs/skein_zfs.c	(nonexistent)
+++ head/sys/cddl/boot/zfs/skein_zfs.c	(revision 304321)
@@ -0,0 +1,92 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://opensource.org/licenses/CDDL-1.0.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2013 Saso Kiselkov.  All rights reserved.
+ */
+#include <skein.h>
+
+/*
+ * Computes a native 256-bit skein MAC checksum. Please note that this
+ * function requires the presence of a ctx_template that should be allocated
+ * using zio_checksum_skein_tmpl_init.
+ */
+/*ARGSUSED*/
+static void
+zio_checksum_skein_native(const void *buf, uint64_t size,
+    const void *ctx_template, zio_cksum_t *zcp)
+{
+	Skein_512_Ctxt_t	ctx;
+
+	ASSERT(ctx_template != NULL);
+	bcopy(ctx_template, &ctx, sizeof (ctx));
+	(void) Skein_512_Update(&ctx, buf, size);
+	(void) Skein_512_Final(&ctx, (uint8_t *)zcp);
+	bzero(&ctx, sizeof (ctx));
+}
+
+/*
+ * Byteswapped version of zio_checksum_skein_native. This just invokes
+ * the native checksum function and byteswaps the resulting checksum (since
+ * skein is internally endian-insensitive).
+ */
+static void
+zio_checksum_skein_byteswap(const void *buf, uint64_t size,
+    const void *ctx_template, zio_cksum_t *zcp)
+{
+	zio_cksum_t	tmp;
+
+	zio_checksum_skein_native(buf, size, ctx_template, &tmp);
+	zcp->zc_word[0] = BSWAP_64(tmp.zc_word[0]);
+	zcp->zc_word[1] = BSWAP_64(tmp.zc_word[1]);
+	zcp->zc_word[2] = BSWAP_64(tmp.zc_word[2]);
+	zcp->zc_word[3] = BSWAP_64(tmp.zc_word[3]);
+}
+
+/*
+ * Allocates a skein MAC template suitable for using in skein MAC checksum
+ * computations and returns a pointer to it.
+ */
+static void *
+zio_checksum_skein_tmpl_init(const zio_cksum_salt_t *salt)
+{
+	Skein_512_Ctxt_t	*ctx;
+
+	ctx = malloc(sizeof (*ctx));
+	bzero(ctx, sizeof (*ctx));
+	(void) Skein_512_InitExt(ctx, sizeof (zio_cksum_t) * 8, 0,
+	    salt->zcs_bytes, sizeof (salt->zcs_bytes));
+	return (ctx);
+}
+
+/*
+ * Frees a skein context template previously allocated using
+ * zio_checksum_skein_tmpl_init.
+ */
+static void
+zio_checksum_skein_tmpl_free(void *ctx_template)
+{
+	Skein_512_Ctxt_t	*ctx = ctx_template;
+
+	bzero(ctx, sizeof (*ctx));
+	free(ctx);
+}

Property changes on: head/sys/cddl/boot/zfs/skein_zfs.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+FreeBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: head/sys/cddl/boot/zfs/zfsimpl.h
===================================================================
--- head/sys/cddl/boot/zfs/zfsimpl.h	(revision 304320)
+++ head/sys/cddl/boot/zfs/zfsimpl.h	(revision 304321)
@@ -1,1505 +1,1520 @@
 /*-
  * Copyright (c) 2002 McAfee, Inc.
  * All rights reserved.
  *
  * This software was developed for the FreeBSD Project by Marshall
  * Kirk McKusick and McAfee Research,, the Security Research Division of
  * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
  * part of the DARPA CHATS research program
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
 /*
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
  * Common Development and Distribution License (the "License").
  * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
  * See the License for the specific language governing permissions
  * and limitations under the License.
  *
  * When distributing Covered Code, include this CDDL HEADER in each
  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  * If applicable, add the following below this CDDL HEADER, with the
  * fields enclosed by brackets "[]" replaced with your own identifying
  * information: Portions Copyright [yyyy] [name of copyright owner]
  *
  * CDDL HEADER END
  */
 /*
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 /*
  * Copyright 2013 by Saso Kiselkov. All rights reserved.
  */
 /*
  * Copyright (c) 2013 by Delphix. All rights reserved.
  */
 
 #define	MAXNAMELEN	256
 
 #define _NOTE(s)
 
 typedef enum { B_FALSE, B_TRUE } boolean_t;
 
 /* CRC64 table */
 #define	ZFS_CRC64_POLY	0xC96C5795D7870F42ULL	/* ECMA-182, reflected form */
 
 /*
  * Macros for various sorts of alignment and rounding when the alignment
  * is known to be a power of 2.
  */
 #define	P2ALIGN(x, align)		((x) & -(align))
 #define	P2PHASE(x, align)		((x) & ((align) - 1))
 #define	P2NPHASE(x, align)		(-(x) & ((align) - 1))
 #define	P2ROUNDUP(x, align)		(-(-(x) & -(align)))
 #define	P2END(x, align)			(-(~(x) & -(align)))
 #define	P2PHASEUP(x, align, phase)	((phase) - (((phase) - (x)) & -(align)))
 #define	P2BOUNDARY(off, len, align)	(((off) ^ ((off) + (len) - 1)) > (align) - 1)
 
 /*
  * General-purpose 32-bit and 64-bit bitfield encodings.
  */
 #define	BF32_DECODE(x, low, len)	P2PHASE((x) >> (low), 1U << (len))
 #define	BF64_DECODE(x, low, len)	P2PHASE((x) >> (low), 1ULL << (len))
 #define	BF32_ENCODE(x, low, len)	(P2PHASE((x), 1U << (len)) << (low))
 #define	BF64_ENCODE(x, low, len)	(P2PHASE((x), 1ULL << (len)) << (low))
 
 #define	BF32_GET(x, low, len)		BF32_DECODE(x, low, len)
 #define	BF64_GET(x, low, len)		BF64_DECODE(x, low, len)
 
 #define	BF32_SET(x, low, len, val)	\
 	((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
 #define	BF64_SET(x, low, len, val)	\
 	((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
 
 #define	BF32_GET_SB(x, low, len, shift, bias)	\
 	((BF32_GET(x, low, len) + (bias)) << (shift))
 #define	BF64_GET_SB(x, low, len, shift, bias)	\
 	((BF64_GET(x, low, len) + (bias)) << (shift))
 
 #define	BF32_SET_SB(x, low, len, shift, bias, val)	\
 	BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
 #define	BF64_SET_SB(x, low, len, shift, bias, val)	\
 	BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
 
 /*
  * Macros to reverse byte order
  */
 #define	BSWAP_8(x)	((x) & 0xff)
 #define	BSWAP_16(x)	((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
 #define	BSWAP_32(x)	((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
 #define	BSWAP_64(x)	((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
 
-/*
- * Note: the boot loader can't actually read blocks larger than 128KB,
- * due to lack of memory.  Therefore its SPA_MAXBLOCKSIZE is still 128KB.
- */
 #define	SPA_MINBLOCKSHIFT	9
-#define	SPA_MAXBLOCKSHIFT	17
+#define	SPA_OLDMAXBLOCKSHIFT	17
+#define	SPA_MAXBLOCKSHIFT	24
 #define	SPA_MINBLOCKSIZE	(1ULL << SPA_MINBLOCKSHIFT)
+#define	SPA_OLDMAXBLOCKSIZE	(1ULL << SPA_OLDMAXBLOCKSHIFT)
 #define	SPA_MAXBLOCKSIZE	(1ULL << SPA_MAXBLOCKSHIFT)
 
 /*
  * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
  * The ASIZE encoding should be at least 64 times larger (6 more bits)
  * to support up to 4-way RAID-Z mirror mode with worst-case gang block
  * overhead, three DVAs per bp, plus one more bit in case we do anything
  * else that expands the ASIZE.
  */
 #define	SPA_LSIZEBITS		16	/* LSIZE up to 32M (2^16 * 512)	*/
 #define	SPA_PSIZEBITS		16	/* PSIZE up to 32M (2^16 * 512)	*/
 #define	SPA_ASIZEBITS		24	/* ASIZE up to 64 times larger	*/
 
 /*
  * All SPA data is represented by 128-bit data virtual addresses (DVAs).
  * The members of the dva_t should be considered opaque outside the SPA.
  */
 typedef struct dva {
 	uint64_t	dva_word[2];
 } dva_t;
 
 /*
  * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
  */
 typedef struct zio_cksum {
 	uint64_t	zc_word[4];
 } zio_cksum_t;
 
 /*
+ * Some checksums/hashes need a 256-bit initialization salt. This salt is kept
+ * secret and is suitable for use in MAC algorithms as the key.
+ */
+typedef struct zio_cksum_salt {
+	uint8_t		zcs_bytes[32];
+} zio_cksum_salt_t;
+
+/*
  * Each block is described by its DVAs, time of birth, checksum, etc.
  * The word-by-word, bit-by-bit layout of the blkptr is as follows:
  *
  *	64	56	48	40	32	24	16	8	0
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 0	|		vdev1		| GRID  |	  ASIZE		|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 1	|G|			 offset1				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 2	|		vdev2		| GRID  |	  ASIZE		|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 3	|G|			 offset2				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 4	|		vdev3		| GRID  |	  ASIZE		|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 5	|G|			 offset3				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 6	|BDX|lvl| type	| cksum |E| comp|    PSIZE	|     LSIZE	|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 7	|			padding					|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 8	|			padding					|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 9	|			physical birth txg			|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * a	|			logical birth txg			|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * b	|			fill count				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * c	|			checksum[0]				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * d	|			checksum[1]				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * e	|			checksum[2]				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * f	|			checksum[3]				|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  *
  * Legend:
  *
  * vdev		virtual device ID
  * offset	offset into virtual device
  * LSIZE	logical size
  * PSIZE	physical size (after compression)
  * ASIZE	allocated size (including RAID-Z parity and gang block headers)
  * GRID		RAID-Z layout information (reserved for future use)
  * cksum	checksum function
  * comp		compression function
  * G		gang block indicator
  * B		byteorder (endianness)
  * D		dedup
  * X		encryption (on version 30, which is not supported)
  * E		blkptr_t contains embedded data (see below)
  * lvl		level of indirection
  * type		DMU object type
  * phys birth	txg of block allocation; zero if same as logical birth txg
  * log. birth	transaction group in which the block was logically born
  * fill count	number of non-zero blocks under this bp
  * checksum[4]	256-bit checksum of the data this bp describes
  */
 
 /*
  * "Embedded" blkptr_t's don't actually point to a block, instead they
  * have a data payload embedded in the blkptr_t itself.  See the comment
  * in blkptr.c for more details.
  *
  * The blkptr_t is laid out as follows:
  *
  *	64	56	48	40	32	24	16	8	0
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 0	|      payload                                                  |
  * 1	|      payload                                                  |
  * 2	|      payload                                                  |
  * 3	|      payload                                                  |
  * 4	|      payload                                                  |
  * 5	|      payload                                                  |
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 6	|BDX|lvl| type	| etype |E| comp| PSIZE|              LSIZE	|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * 7	|      payload                                                  |
  * 8	|      payload                                                  |
  * 9	|      payload                                                  |
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * a	|			logical birth txg			|
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  * b	|      payload                                                  |
  * c	|      payload                                                  |
  * d	|      payload                                                  |
  * e	|      payload                                                  |
  * f	|      payload                                                  |
  *	+-------+-------+-------+-------+-------+-------+-------+-------+
  *
  * Legend:
  *
  * payload		contains the embedded data
  * B (byteorder)	byteorder (endianness)
  * D (dedup)		padding (set to zero)
  * X			encryption (set to zero; see above)
  * E (embedded)		set to one
  * lvl			indirection level
  * type			DMU object type
  * etype		how to interpret embedded data (BP_EMBEDDED_TYPE_*)
  * comp			compression function of payload
  * PSIZE		size of payload after compression, in bytes
  * LSIZE		logical size of payload, in bytes
  *			note that 25 bits is enough to store the largest
  *			"normal" BP's LSIZE (2^16 * 2^9) in bytes
  * log. birth		transaction group in which the block was logically born
  *
  * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
  * bp's they are stored in units of SPA_MINBLOCKSHIFT.
  * Generally, the generic BP_GET_*() macros can be used on embedded BP's.
  * The B, D, X, lvl, type, and comp fields are stored the same as with normal
  * BP's so the BP_SET_* macros can be used with them.  etype, PSIZE, LSIZE must
  * be set with the BPE_SET_* macros.  BP_SET_EMBEDDED() should be called before
  * other macros, as they assert that they are only used on BP's of the correct
  * "embedded-ness".
  */
 
 #define	BPE_GET_ETYPE(bp)	\
 	(ASSERT(BP_IS_EMBEDDED(bp)), \
 	BF64_GET((bp)->blk_prop, 40, 8))
 #define	BPE_SET_ETYPE(bp, t)	do { \
 	ASSERT(BP_IS_EMBEDDED(bp)); \
 	BF64_SET((bp)->blk_prop, 40, 8, t); \
 _NOTE(CONSTCOND) } while (0)
 
 #define	BPE_GET_LSIZE(bp)	\
 	(ASSERT(BP_IS_EMBEDDED(bp)), \
 	BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
 #define	BPE_SET_LSIZE(bp, x)	do { \
 	ASSERT(BP_IS_EMBEDDED(bp)); \
 	BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
 _NOTE(CONSTCOND) } while (0)
 
 #define	BPE_GET_PSIZE(bp)	\
 	(ASSERT(BP_IS_EMBEDDED(bp)), \
 	BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
 #define	BPE_SET_PSIZE(bp, x)	do { \
 	ASSERT(BP_IS_EMBEDDED(bp)); \
 	BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
 _NOTE(CONSTCOND) } while (0)
 
 typedef enum bp_embedded_type {
 	BP_EMBEDDED_TYPE_DATA,
 	BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
 	NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
 } bp_embedded_type_t;
 
 #define	BPE_NUM_WORDS 14
 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
 #define	BPE_IS_PAYLOADWORD(bp, wp) \
 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
 
 #define	SPA_BLKPTRSHIFT	7		/* blkptr_t is 128 bytes	*/
 #define	SPA_DVAS_PER_BP	3		/* Number of DVAs in a bp	*/
 
 typedef struct blkptr {
 	dva_t		blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
 	uint64_t	blk_prop;	/* size, compression, type, etc	    */
 	uint64_t	blk_pad[2];	/* Extra space for the future	    */
 	uint64_t	blk_phys_birth;	/* txg when block was allocated	    */
 	uint64_t	blk_birth;	/* transaction group at birth	    */
 	uint64_t	blk_fill;	/* fill count			    */
 	zio_cksum_t	blk_cksum;	/* 256-bit checksum		    */
 } blkptr_t;
 
 /*
  * Macros to get and set fields in a bp or DVA.
  */
 #define	DVA_GET_ASIZE(dva)	\
 	BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
 #define	DVA_SET_ASIZE(dva, x)	\
 	BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
 	SPA_MINBLOCKSHIFT, 0, x)
 
 #define	DVA_GET_GRID(dva)	BF64_GET((dva)->dva_word[0], 24, 8)
 #define	DVA_SET_GRID(dva, x)	BF64_SET((dva)->dva_word[0], 24, 8, x)
 
 #define	DVA_GET_VDEV(dva)	BF64_GET((dva)->dva_word[0], 32, 32)
 #define	DVA_SET_VDEV(dva, x)	BF64_SET((dva)->dva_word[0], 32, 32, x)
 
 #define	DVA_GET_OFFSET(dva)	\
 	BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
 #define	DVA_SET_OFFSET(dva, x)	\
 	BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
 
 #define	DVA_GET_GANG(dva)	BF64_GET((dva)->dva_word[1], 63, 1)
 #define	DVA_SET_GANG(dva, x)	BF64_SET((dva)->dva_word[1], 63, 1, x)
 
 #define	BP_GET_LSIZE(bp)	\
 	(BP_IS_EMBEDDED(bp) ?	\
 	(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
 	BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
 #define	BP_SET_LSIZE(bp, x)	do { \
 	ASSERT(!BP_IS_EMBEDDED(bp)); \
 	BF64_SET_SB((bp)->blk_prop, \
 	    0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
 _NOTE(CONSTCOND) } while (0)
 
 #define	BP_GET_PSIZE(bp)	\
 	BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
 #define	BP_SET_PSIZE(bp, x)	\
 	BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
 
 #define	BP_GET_COMPRESS(bp)	BF64_GET((bp)->blk_prop, 32, 7)
 #define	BP_SET_COMPRESS(bp, x)	BF64_SET((bp)->blk_prop, 32, 7, x)
 
 #define	BP_GET_CHECKSUM(bp)	BF64_GET((bp)->blk_prop, 40, 8)
 #define	BP_SET_CHECKSUM(bp, x)	BF64_SET((bp)->blk_prop, 40, 8, x)
 
 #define	BP_GET_TYPE(bp)		BF64_GET((bp)->blk_prop, 48, 8)
 #define	BP_SET_TYPE(bp, x)	BF64_SET((bp)->blk_prop, 48, 8, x)
 
 #define	BP_GET_LEVEL(bp)	BF64_GET((bp)->blk_prop, 56, 5)
 #define	BP_SET_LEVEL(bp, x)	BF64_SET((bp)->blk_prop, 56, 5, x)
 
 #define	BP_IS_EMBEDDED(bp)	BF64_GET((bp)->blk_prop, 39, 1)
 
 #define	BP_GET_DEDUP(bp)	BF64_GET((bp)->blk_prop, 62, 1)
 #define	BP_SET_DEDUP(bp, x)	BF64_SET((bp)->blk_prop, 62, 1, x)
 
 #define	BP_GET_BYTEORDER(bp)	BF64_GET((bp)->blk_prop, 63, 1)
 #define	BP_SET_BYTEORDER(bp, x)	BF64_SET((bp)->blk_prop, 63, 1, x)
 
 #define	BP_PHYSICAL_BIRTH(bp)		\
 	((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
 
 #define	BP_GET_ASIZE(bp)	\
 	(DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
 		DVA_GET_ASIZE(&(bp)->blk_dva[2]))
 
 #define	BP_GET_UCSIZE(bp) \
 	((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
 	BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
 
 #define	BP_GET_NDVAS(bp)	\
 	(!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
 	!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
 	!!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
 
 #define	DVA_EQUAL(dva1, dva2)	\
 	((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
 	(dva1)->dva_word[0] == (dva2)->dva_word[0])
 
 #define	ZIO_CHECKSUM_EQUAL(zc1, zc2) \
 	(0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
 	((zc1).zc_word[1] - (zc2).zc_word[1]) | \
 	((zc1).zc_word[2] - (zc2).zc_word[2]) | \
 	((zc1).zc_word[3] - (zc2).zc_word[3])))
 
 
 #define	DVA_IS_VALID(dva)	(DVA_GET_ASIZE(dva) != 0)
 
 #define	ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3)	\
 {						\
 	(zcp)->zc_word[0] = w0;			\
 	(zcp)->zc_word[1] = w1;			\
 	(zcp)->zc_word[2] = w2;			\
 	(zcp)->zc_word[3] = w3;			\
 }
 
 #define	BP_IDENTITY(bp)		(&(bp)->blk_dva[0])
 #define	BP_IS_GANG(bp)		DVA_GET_GANG(BP_IDENTITY(bp))
 #define	DVA_IS_EMPTY(dva)	((dva)->dva_word[0] == 0ULL &&  \
 	(dva)->dva_word[1] == 0ULL)
 #define	BP_IS_HOLE(bp)		DVA_IS_EMPTY(BP_IDENTITY(bp))
 #define	BP_IS_OLDER(bp, txg)	(!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
 
 #define	BP_ZERO(bp)				\
 {						\
 	(bp)->blk_dva[0].dva_word[0] = 0;	\
 	(bp)->blk_dva[0].dva_word[1] = 0;	\
 	(bp)->blk_dva[1].dva_word[0] = 0;	\
 	(bp)->blk_dva[1].dva_word[1] = 0;	\
 	(bp)->blk_dva[2].dva_word[0] = 0;	\
 	(bp)->blk_dva[2].dva_word[1] = 0;	\
 	(bp)->blk_prop = 0;			\
 	(bp)->blk_pad[0] = 0;			\
 	(bp)->blk_pad[1] = 0;			\
 	(bp)->blk_phys_birth = 0;		\
 	(bp)->blk_birth = 0;			\
 	(bp)->blk_fill = 0;			\
 	ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0);	\
 }
 
 #define	BPE_NUM_WORDS 14
 #define	BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
 #define	BPE_IS_PAYLOADWORD(bp, wp) \
 	((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
 
 /*
  * Embedded checksum
  */
 #define	ZEC_MAGIC	0x210da7ab10c7a11ULL
 
 typedef struct zio_eck {
 	uint64_t	zec_magic;	/* for validation, endianness	*/
 	zio_cksum_t	zec_cksum;	/* 256-bit checksum		*/
 } zio_eck_t;
 
 /*
  * Gang block headers are self-checksumming and contain an array
  * of block pointers.
  */
 #define	SPA_GANGBLOCKSIZE	SPA_MINBLOCKSIZE
 #define	SPA_GBH_NBLKPTRS	((SPA_GANGBLOCKSIZE - \
 	sizeof (zio_eck_t)) / sizeof (blkptr_t))
 #define	SPA_GBH_FILLER		((SPA_GANGBLOCKSIZE - \
 	sizeof (zio_eck_t) - \
 	(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
 	sizeof (uint64_t))
 
 typedef struct zio_gbh {
 	blkptr_t		zg_blkptr[SPA_GBH_NBLKPTRS];
 	uint64_t		zg_filler[SPA_GBH_FILLER];
 	zio_eck_t		zg_tail;
 } zio_gbh_phys_t;
 
 #define	VDEV_RAIDZ_MAXPARITY	3
 
 #define	VDEV_PAD_SIZE		(8 << 10)
 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */
 #define	VDEV_SKIP_SIZE		VDEV_PAD_SIZE * 2
 #define	VDEV_PHYS_SIZE		(112 << 10)
 #define	VDEV_UBERBLOCK_RING	(128 << 10)
 
 #define	VDEV_UBERBLOCK_SHIFT(vd)	\
 	MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT)
 #define	VDEV_UBERBLOCK_COUNT(vd)	\
 	(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
 #define	VDEV_UBERBLOCK_OFFSET(vd, n)	\
 	offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
 #define	VDEV_UBERBLOCK_SIZE(vd)		(1ULL << VDEV_UBERBLOCK_SHIFT(vd))
 
 typedef struct vdev_phys {
 	char		vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
 	zio_eck_t	vp_zbt;
 } vdev_phys_t;
 
 typedef struct vdev_label {
 	char		vl_pad1[VDEV_PAD_SIZE];			/*  8K  */
 	char		vl_pad2[VDEV_PAD_SIZE];			/*  8K  */
 	vdev_phys_t	vl_vdev_phys;				/* 112K	*/
 	char		vl_uberblock[VDEV_UBERBLOCK_RING];	/* 128K	*/
 } vdev_label_t;							/* 256K total */
 
 /*
  * vdev_dirty() flags
  */
 #define	VDD_METASLAB	0x01
 #define	VDD_DTL		0x02
 
 /*
  * Size and offset of embedded boot loader region on each label.
  * The total size of the first two labels plus the boot area is 4MB.
  */
 #define	VDEV_BOOT_OFFSET	(2 * sizeof (vdev_label_t))
 #define	VDEV_BOOT_SIZE		(7ULL << 19)			/* 3.5M	*/
 
 /*
  * Size of label regions at the start and end of each leaf device.
  */
 #define	VDEV_LABEL_START_SIZE	(2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
 #define	VDEV_LABEL_END_SIZE	(2 * sizeof (vdev_label_t))
 #define	VDEV_LABELS		4
 
 enum zio_checksum {
 	ZIO_CHECKSUM_INHERIT = 0,
 	ZIO_CHECKSUM_ON,
 	ZIO_CHECKSUM_OFF,
 	ZIO_CHECKSUM_LABEL,
 	ZIO_CHECKSUM_GANG_HEADER,
 	ZIO_CHECKSUM_ZILOG,
 	ZIO_CHECKSUM_FLETCHER_2,
 	ZIO_CHECKSUM_FLETCHER_4,
 	ZIO_CHECKSUM_SHA256,
 	ZIO_CHECKSUM_ZILOG2,
+	ZIO_CHECKSUM_NOPARITY,
+	ZIO_CHECKSUM_SHA512,
+	ZIO_CHECKSUM_SKEIN,
+	ZIO_CHECKSUM_EDONR,
 	ZIO_CHECKSUM_FUNCTIONS
 };
 
 #define	ZIO_CHECKSUM_ON_VALUE	ZIO_CHECKSUM_FLETCHER_4
 #define	ZIO_CHECKSUM_DEFAULT	ZIO_CHECKSUM_ON
 
 enum zio_compress {
 	ZIO_COMPRESS_INHERIT = 0,
 	ZIO_COMPRESS_ON,
 	ZIO_COMPRESS_OFF,
 	ZIO_COMPRESS_LZJB,
 	ZIO_COMPRESS_EMPTY,
 	ZIO_COMPRESS_GZIP_1,
 	ZIO_COMPRESS_GZIP_2,
 	ZIO_COMPRESS_GZIP_3,
 	ZIO_COMPRESS_GZIP_4,
 	ZIO_COMPRESS_GZIP_5,
 	ZIO_COMPRESS_GZIP_6,
 	ZIO_COMPRESS_GZIP_7,
 	ZIO_COMPRESS_GZIP_8,
 	ZIO_COMPRESS_GZIP_9,
 	ZIO_COMPRESS_ZLE,
 	ZIO_COMPRESS_LZ4,
 	ZIO_COMPRESS_FUNCTIONS
 };
 
 #define	ZIO_COMPRESS_ON_VALUE	ZIO_COMPRESS_LZJB
 #define	ZIO_COMPRESS_DEFAULT	ZIO_COMPRESS_OFF
 
 /* nvlist pack encoding */
 #define	NV_ENCODE_NATIVE	0
 #define	NV_ENCODE_XDR		1
 
 typedef enum {
 	DATA_TYPE_UNKNOWN = 0,
 	DATA_TYPE_BOOLEAN,
 	DATA_TYPE_BYTE,
 	DATA_TYPE_INT16,
 	DATA_TYPE_UINT16,
 	DATA_TYPE_INT32,
 	DATA_TYPE_UINT32,
 	DATA_TYPE_INT64,
 	DATA_TYPE_UINT64,
 	DATA_TYPE_STRING,
 	DATA_TYPE_BYTE_ARRAY,
 	DATA_TYPE_INT16_ARRAY,
 	DATA_TYPE_UINT16_ARRAY,
 	DATA_TYPE_INT32_ARRAY,
 	DATA_TYPE_UINT32_ARRAY,
 	DATA_TYPE_INT64_ARRAY,
 	DATA_TYPE_UINT64_ARRAY,
 	DATA_TYPE_STRING_ARRAY,
 	DATA_TYPE_HRTIME,
 	DATA_TYPE_NVLIST,
 	DATA_TYPE_NVLIST_ARRAY,
 	DATA_TYPE_BOOLEAN_VALUE,
 	DATA_TYPE_INT8,
 	DATA_TYPE_UINT8,
 	DATA_TYPE_BOOLEAN_ARRAY,
 	DATA_TYPE_INT8_ARRAY,
 	DATA_TYPE_UINT8_ARRAY
 } data_type_t;
 
 /*
  * On-disk version number.
  */
 #define	SPA_VERSION_1			1ULL
 #define	SPA_VERSION_2			2ULL
 #define	SPA_VERSION_3			3ULL
 #define	SPA_VERSION_4			4ULL
 #define	SPA_VERSION_5			5ULL
 #define	SPA_VERSION_6			6ULL
 #define	SPA_VERSION_7			7ULL
 #define	SPA_VERSION_8			8ULL
 #define	SPA_VERSION_9			9ULL
 #define	SPA_VERSION_10			10ULL
 #define	SPA_VERSION_11			11ULL
 #define	SPA_VERSION_12			12ULL
 #define	SPA_VERSION_13			13ULL
 #define	SPA_VERSION_14			14ULL
 #define	SPA_VERSION_15			15ULL
 #define	SPA_VERSION_16			16ULL
 #define	SPA_VERSION_17			17ULL
 #define	SPA_VERSION_18			18ULL
 #define	SPA_VERSION_19			19ULL
 #define	SPA_VERSION_20			20ULL
 #define	SPA_VERSION_21			21ULL
 #define	SPA_VERSION_22			22ULL
 #define	SPA_VERSION_23			23ULL
 #define	SPA_VERSION_24			24ULL
 #define	SPA_VERSION_25			25ULL
 #define	SPA_VERSION_26			26ULL
 #define	SPA_VERSION_27			27ULL
 #define	SPA_VERSION_28			28ULL
 #define	SPA_VERSION_5000		5000ULL
 
 /*
  * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk
  * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*},
  * and do the appropriate changes.  Also bump the version number in
  * usr/src/grub/capability.
  */
 #define	SPA_VERSION			SPA_VERSION_5000
 #define	SPA_VERSION_STRING		"5000"
 
 /*
  * Symbolic names for the changes that caused a SPA_VERSION switch.
  * Used in the code when checking for presence or absence of a feature.
  * Feel free to define multiple symbolic names for each version if there
  * were multiple changes to on-disk structures during that version.
  *
  * NOTE: When checking the current SPA_VERSION in your code, be sure
  *       to use spa_version() since it reports the version of the
  *       last synced uberblock.  Checking the in-flight version can
  *       be dangerous in some cases.
  */
 #define	SPA_VERSION_INITIAL		SPA_VERSION_1
 #define	SPA_VERSION_DITTO_BLOCKS	SPA_VERSION_2
 #define	SPA_VERSION_SPARES		SPA_VERSION_3
 #define	SPA_VERSION_RAID6		SPA_VERSION_3
 #define	SPA_VERSION_BPLIST_ACCOUNT	SPA_VERSION_3
 #define	SPA_VERSION_RAIDZ_DEFLATE	SPA_VERSION_3
 #define	SPA_VERSION_DNODE_BYTES		SPA_VERSION_3
 #define	SPA_VERSION_ZPOOL_HISTORY	SPA_VERSION_4
 #define	SPA_VERSION_GZIP_COMPRESSION	SPA_VERSION_5
 #define	SPA_VERSION_BOOTFS		SPA_VERSION_6
 #define	SPA_VERSION_SLOGS		SPA_VERSION_7
 #define	SPA_VERSION_DELEGATED_PERMS	SPA_VERSION_8
 #define	SPA_VERSION_FUID		SPA_VERSION_9
 #define	SPA_VERSION_REFRESERVATION	SPA_VERSION_9
 #define	SPA_VERSION_REFQUOTA		SPA_VERSION_9
 #define	SPA_VERSION_UNIQUE_ACCURATE	SPA_VERSION_9
 #define	SPA_VERSION_L2CACHE		SPA_VERSION_10
 #define	SPA_VERSION_NEXT_CLONES		SPA_VERSION_11
 #define	SPA_VERSION_ORIGIN		SPA_VERSION_11
 #define	SPA_VERSION_DSL_SCRUB		SPA_VERSION_11
 #define	SPA_VERSION_SNAP_PROPS		SPA_VERSION_12
 #define	SPA_VERSION_USED_BREAKDOWN	SPA_VERSION_13
 #define	SPA_VERSION_PASSTHROUGH_X	SPA_VERSION_14
 #define SPA_VERSION_USERSPACE		SPA_VERSION_15
 #define	SPA_VERSION_STMF_PROP		SPA_VERSION_16
 #define	SPA_VERSION_RAIDZ3		SPA_VERSION_17
 #define	SPA_VERSION_USERREFS		SPA_VERSION_18
 #define	SPA_VERSION_HOLES		SPA_VERSION_19
 #define	SPA_VERSION_ZLE_COMPRESSION	SPA_VERSION_20
 #define	SPA_VERSION_DEDUP		SPA_VERSION_21
 #define	SPA_VERSION_RECVD_PROPS		SPA_VERSION_22
 #define	SPA_VERSION_SLIM_ZIL		SPA_VERSION_23
 #define	SPA_VERSION_SA			SPA_VERSION_24
 #define	SPA_VERSION_SCAN		SPA_VERSION_25
 #define	SPA_VERSION_DIR_CLONES		SPA_VERSION_26
 #define	SPA_VERSION_DEADLISTS		SPA_VERSION_26
 #define	SPA_VERSION_FAST_SNAP		SPA_VERSION_27
 #define	SPA_VERSION_MULTI_REPLACE	SPA_VERSION_28
 #define	SPA_VERSION_BEFORE_FEATURES	SPA_VERSION_28
 #define	SPA_VERSION_FEATURES		SPA_VERSION_5000
 
 #define	SPA_VERSION_IS_SUPPORTED(v) \
 	(((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
 	((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
 
 /*
  * The following are configuration names used in the nvlist describing a pool's
  * configuration.
  */
 #define	ZPOOL_CONFIG_VERSION		"version"
 #define	ZPOOL_CONFIG_POOL_NAME		"name"
 #define	ZPOOL_CONFIG_POOL_STATE		"state"
 #define	ZPOOL_CONFIG_POOL_TXG		"txg"
 #define	ZPOOL_CONFIG_POOL_GUID		"pool_guid"
 #define	ZPOOL_CONFIG_CREATE_TXG		"create_txg"
 #define	ZPOOL_CONFIG_TOP_GUID		"top_guid"
 #define	ZPOOL_CONFIG_VDEV_TREE		"vdev_tree"
 #define	ZPOOL_CONFIG_TYPE		"type"
 #define	ZPOOL_CONFIG_CHILDREN		"children"
 #define	ZPOOL_CONFIG_ID			"id"
 #define	ZPOOL_CONFIG_GUID		"guid"
 #define	ZPOOL_CONFIG_PATH		"path"
 #define	ZPOOL_CONFIG_DEVID		"devid"
 #define	ZPOOL_CONFIG_METASLAB_ARRAY	"metaslab_array"
 #define	ZPOOL_CONFIG_METASLAB_SHIFT	"metaslab_shift"
 #define	ZPOOL_CONFIG_ASHIFT		"ashift"
 #define	ZPOOL_CONFIG_ASIZE		"asize"
 #define	ZPOOL_CONFIG_DTL		"DTL"
 #define	ZPOOL_CONFIG_STATS		"stats"
 #define	ZPOOL_CONFIG_WHOLE_DISK		"whole_disk"
 #define	ZPOOL_CONFIG_ERRCOUNT		"error_count"
 #define	ZPOOL_CONFIG_NOT_PRESENT	"not_present"
 #define	ZPOOL_CONFIG_SPARES		"spares"
 #define	ZPOOL_CONFIG_IS_SPARE		"is_spare"
 #define	ZPOOL_CONFIG_NPARITY		"nparity"
 #define	ZPOOL_CONFIG_HOSTID		"hostid"
 #define	ZPOOL_CONFIG_HOSTNAME		"hostname"
 #define	ZPOOL_CONFIG_IS_LOG		"is_log"
 #define	ZPOOL_CONFIG_TIMESTAMP		"timestamp" /* not stored on disk */
 #define	ZPOOL_CONFIG_FEATURES_FOR_READ	"features_for_read"
 
 /*
  * The persistent vdev state is stored as separate values rather than a single
  * 'vdev_state' entry.  This is because a device can be in multiple states, such
  * as offline and degraded.
  */
 #define	ZPOOL_CONFIG_OFFLINE            "offline"
 #define	ZPOOL_CONFIG_FAULTED            "faulted"
 #define	ZPOOL_CONFIG_DEGRADED           "degraded"
 #define	ZPOOL_CONFIG_REMOVED            "removed"
 #define	ZPOOL_CONFIG_FRU		"fru"
 #define	ZPOOL_CONFIG_AUX_STATE		"aux_state"
 
 #define	VDEV_TYPE_ROOT			"root"
 #define	VDEV_TYPE_MIRROR		"mirror"
 #define	VDEV_TYPE_REPLACING		"replacing"
 #define	VDEV_TYPE_RAIDZ			"raidz"
 #define	VDEV_TYPE_DISK			"disk"
 #define	VDEV_TYPE_FILE			"file"
 #define	VDEV_TYPE_MISSING		"missing"
 #define	VDEV_TYPE_HOLE			"hole"
 #define	VDEV_TYPE_SPARE			"spare"
 #define	VDEV_TYPE_LOG			"log"
 #define	VDEV_TYPE_L2CACHE		"l2cache"
 
 /*
  * This is needed in userland to report the minimum necessary device size.
  */
 #define	SPA_MINDEVSIZE		(64ULL << 20)
 
 /*
  * The location of the pool configuration repository, shared between kernel and
  * userland.
  */
 #define	ZPOOL_CACHE		"/boot/zfs/zpool.cache"
 
 /*
  * vdev states are ordered from least to most healthy.
  * A vdev that's CANT_OPEN or below is considered unusable.
  */
 typedef enum vdev_state {
 	VDEV_STATE_UNKNOWN = 0,	/* Uninitialized vdev			*/
 	VDEV_STATE_CLOSED,	/* Not currently open			*/
 	VDEV_STATE_OFFLINE,	/* Not allowed to open			*/
 	VDEV_STATE_REMOVED,	/* Explicitly removed from system	*/
 	VDEV_STATE_CANT_OPEN,	/* Tried to open, but failed		*/
 	VDEV_STATE_FAULTED,	/* External request to fault device	*/
 	VDEV_STATE_DEGRADED,	/* Replicated vdev with unhealthy kids	*/
 	VDEV_STATE_HEALTHY	/* Presumed good			*/
 } vdev_state_t;
 
 /*
  * vdev aux states.  When a vdev is in the CANT_OPEN state, the aux field
  * of the vdev stats structure uses these constants to distinguish why.
  */
 typedef enum vdev_aux {
 	VDEV_AUX_NONE,		/* no error				*/
 	VDEV_AUX_OPEN_FAILED,	/* ldi_open_*() or vn_open() failed	*/
 	VDEV_AUX_CORRUPT_DATA,	/* bad label or disk contents		*/
 	VDEV_AUX_NO_REPLICAS,	/* insufficient number of replicas	*/
 	VDEV_AUX_BAD_GUID_SUM,	/* vdev guid sum doesn't match		*/
 	VDEV_AUX_TOO_SMALL,	/* vdev size is too small		*/
 	VDEV_AUX_BAD_LABEL,	/* the label is OK but invalid		*/
 	VDEV_AUX_VERSION_NEWER,	/* on-disk version is too new		*/
 	VDEV_AUX_VERSION_OLDER,	/* on-disk version is too old		*/
 	VDEV_AUX_SPARED		/* hot spare used in another pool	*/
 } vdev_aux_t;
 
 /*
  * pool state.  The following states are written to disk as part of the normal
  * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE.  The remaining states are
  * software abstractions used at various levels to communicate pool state.
  */
 typedef enum pool_state {
 	POOL_STATE_ACTIVE = 0,		/* In active use		*/
 	POOL_STATE_EXPORTED,		/* Explicitly exported		*/
 	POOL_STATE_DESTROYED,		/* Explicitly destroyed		*/
 	POOL_STATE_SPARE,		/* Reserved for hot spare use	*/
 	POOL_STATE_UNINITIALIZED,	/* Internal spa_t state		*/
 	POOL_STATE_UNAVAIL,		/* Internal libzfs state	*/
 	POOL_STATE_POTENTIALLY_ACTIVE	/* Internal libzfs state	*/
 } pool_state_t;
 
 /*
  * The uberblock version is incremented whenever an incompatible on-disk
  * format change is made to the SPA, DMU, or ZAP.
  *
  * Note: the first two fields should never be moved.  When a storage pool
  * is opened, the uberblock must be read off the disk before the version
  * can be checked.  If the ub_version field is moved, we may not detect
  * version mismatch.  If the ub_magic field is moved, applications that
  * expect the magic number in the first word won't work.
  */
 #define	UBERBLOCK_MAGIC		0x00bab10c		/* oo-ba-bloc!	*/
 #define	UBERBLOCK_SHIFT		10			/* up to 1K	*/
 
 struct uberblock {
 	uint64_t	ub_magic;	/* UBERBLOCK_MAGIC		*/
 	uint64_t	ub_version;	/* SPA_VERSION			*/
 	uint64_t	ub_txg;		/* txg of last sync		*/
 	uint64_t	ub_guid_sum;	/* sum of all vdev guids	*/
 	uint64_t	ub_timestamp;	/* UTC time of last sync	*/
 	blkptr_t	ub_rootbp;	/* MOS objset_phys_t		*/
 };
 
 /*
  * Flags.
  */
 #define	DNODE_MUST_BE_ALLOCATED	1
 #define	DNODE_MUST_BE_FREE	2
 
 /*
  * Fixed constants.
  */
 #define	DNODE_SHIFT		9	/* 512 bytes */
 #define	DN_MIN_INDBLKSHIFT	12	/* 4k */
 #define	DN_MAX_INDBLKSHIFT	14	/* 16k */
 #define	DNODE_BLOCK_SHIFT	14	/* 16k */
 #define	DNODE_CORE_SIZE		64	/* 64 bytes for dnode sans blkptrs */
 #define	DN_MAX_OBJECT_SHIFT	48	/* 256 trillion (zfs_fid_t limit) */
 #define	DN_MAX_OFFSET_SHIFT	64	/* 2^64 bytes in a dnode */
 
 /*
  * Derived constants.
  */
 #define	DNODE_SIZE	(1 << DNODE_SHIFT)
 #define	DN_MAX_NBLKPTR	((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
 #define	DN_MAX_BONUSLEN	(DNODE_SIZE - DNODE_CORE_SIZE - (1 << SPA_BLKPTRSHIFT))
 #define	DN_MAX_OBJECT	(1ULL << DN_MAX_OBJECT_SHIFT)
 
 #define	DNODES_PER_BLOCK_SHIFT	(DNODE_BLOCK_SHIFT - DNODE_SHIFT)
 #define	DNODES_PER_BLOCK	(1ULL << DNODES_PER_BLOCK_SHIFT)
 #define	DNODES_PER_LEVEL_SHIFT	(DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
 
 /* The +2 here is a cheesy way to round up */
 #define	DN_MAX_LEVELS	(2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
 	(DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
 
 #define	DN_BONUS(dnp)	((void*)((dnp)->dn_bonus + \
 	(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
 
 #define	DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
 	(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
 
 #define	EPB(blkshift, typeshift)	(1 << (blkshift - typeshift))
 
 /* Is dn_used in bytes?  if not, it's in multiples of SPA_MINBLOCKSIZE */
 #define	DNODE_FLAG_USED_BYTES		(1<<0)
 #define	DNODE_FLAG_USERUSED_ACCOUNTED	(1<<1)
 
 /* Does dnode have a SA spill blkptr in bonus? */
 #define	DNODE_FLAG_SPILL_BLKPTR	(1<<2)
 
 typedef struct dnode_phys {
 	uint8_t dn_type;		/* dmu_object_type_t */
 	uint8_t dn_indblkshift;		/* ln2(indirect block size) */
 	uint8_t dn_nlevels;		/* 1=dn_blkptr->data blocks */
 	uint8_t dn_nblkptr;		/* length of dn_blkptr */
 	uint8_t dn_bonustype;		/* type of data in bonus buffer */
 	uint8_t	dn_checksum;		/* ZIO_CHECKSUM type */
 	uint8_t	dn_compress;		/* ZIO_COMPRESS type */
 	uint8_t dn_flags;		/* DNODE_FLAG_* */
 	uint16_t dn_datablkszsec;	/* data block size in 512b sectors */
 	uint16_t dn_bonuslen;		/* length of dn_bonus */
 	uint8_t dn_pad2[4];
 
 	/* accounting is protected by dn_dirty_mtx */
 	uint64_t dn_maxblkid;		/* largest allocated block ID */
 	uint64_t dn_used;		/* bytes (or sectors) of disk space */
 
 	uint64_t dn_pad3[4];
 
 	blkptr_t dn_blkptr[1];
 	uint8_t dn_bonus[DN_MAX_BONUSLEN - sizeof (blkptr_t)];
 	blkptr_t dn_spill;
 } dnode_phys_t;
 
 typedef enum dmu_object_byteswap {
 	DMU_BSWAP_UINT8,
 	DMU_BSWAP_UINT16,
 	DMU_BSWAP_UINT32,
 	DMU_BSWAP_UINT64,
 	DMU_BSWAP_ZAP,
 	DMU_BSWAP_DNODE,
 	DMU_BSWAP_OBJSET,
 	DMU_BSWAP_ZNODE,
 	DMU_BSWAP_OLDACL,
 	DMU_BSWAP_ACL,
 	/*
 	 * Allocating a new byteswap type number makes the on-disk format
 	 * incompatible with any other format that uses the same number.
 	 *
 	 * Data can usually be structured to work with one of the
 	 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
 	 */
 	DMU_BSWAP_NUMFUNCS
 } dmu_object_byteswap_t;
 
 #define	DMU_OT_NEWTYPE 0x80
 #define	DMU_OT_METADATA 0x40
 #define	DMU_OT_BYTESWAP_MASK 0x3f
 
 /*
  * Defines a uint8_t object type. Object types specify if the data
  * in the object is metadata (boolean) and how to byteswap the data
  * (dmu_object_byteswap_t).
  */
 #define	DMU_OT(byteswap, metadata) \
 	(DMU_OT_NEWTYPE | \
 	((metadata) ? DMU_OT_METADATA : 0) | \
 	((byteswap) & DMU_OT_BYTESWAP_MASK))
 
 typedef enum dmu_object_type {
 	DMU_OT_NONE,
 	/* general: */
 	DMU_OT_OBJECT_DIRECTORY,	/* ZAP */
 	DMU_OT_OBJECT_ARRAY,		/* UINT64 */
 	DMU_OT_PACKED_NVLIST,		/* UINT8 (XDR by nvlist_pack/unpack) */
 	DMU_OT_PACKED_NVLIST_SIZE,	/* UINT64 */
 	DMU_OT_BPLIST,			/* UINT64 */
 	DMU_OT_BPLIST_HDR,		/* UINT64 */
 	/* spa: */
 	DMU_OT_SPACE_MAP_HEADER,	/* UINT64 */
 	DMU_OT_SPACE_MAP,		/* UINT64 */
 	/* zil: */
 	DMU_OT_INTENT_LOG,		/* UINT64 */
 	/* dmu: */
 	DMU_OT_DNODE,			/* DNODE */
 	DMU_OT_OBJSET,			/* OBJSET */
 	/* dsl: */
 	DMU_OT_DSL_DIR,			/* UINT64 */
 	DMU_OT_DSL_DIR_CHILD_MAP,	/* ZAP */
 	DMU_OT_DSL_DS_SNAP_MAP,		/* ZAP */
 	DMU_OT_DSL_PROPS,		/* ZAP */
 	DMU_OT_DSL_DATASET,		/* UINT64 */
 	/* zpl: */
 	DMU_OT_ZNODE,			/* ZNODE */
 	DMU_OT_OLDACL,			/* Old ACL */
 	DMU_OT_PLAIN_FILE_CONTENTS,	/* UINT8 */
 	DMU_OT_DIRECTORY_CONTENTS,	/* ZAP */
 	DMU_OT_MASTER_NODE,		/* ZAP */
 	DMU_OT_UNLINKED_SET,		/* ZAP */
 	/* zvol: */
 	DMU_OT_ZVOL,			/* UINT8 */
 	DMU_OT_ZVOL_PROP,		/* ZAP */
 	/* other; for testing only! */
 	DMU_OT_PLAIN_OTHER,		/* UINT8 */
 	DMU_OT_UINT64_OTHER,		/* UINT64 */
 	DMU_OT_ZAP_OTHER,		/* ZAP */
 	/* new object types: */
 	DMU_OT_ERROR_LOG,		/* ZAP */
 	DMU_OT_SPA_HISTORY,		/* UINT8 */
 	DMU_OT_SPA_HISTORY_OFFSETS,	/* spa_his_phys_t */
 	DMU_OT_POOL_PROPS,		/* ZAP */
 	DMU_OT_DSL_PERMS,		/* ZAP */
 	DMU_OT_ACL,			/* ACL */
 	DMU_OT_SYSACL,			/* SYSACL */
 	DMU_OT_FUID,			/* FUID table (Packed NVLIST UINT8) */
 	DMU_OT_FUID_SIZE,		/* FUID table size UINT64 */
 	DMU_OT_NEXT_CLONES,		/* ZAP */
 	DMU_OT_SCAN_QUEUE,		/* ZAP */
 	DMU_OT_USERGROUP_USED,		/* ZAP */
 	DMU_OT_USERGROUP_QUOTA,		/* ZAP */
 	DMU_OT_USERREFS,		/* ZAP */
 	DMU_OT_DDT_ZAP,			/* ZAP */
 	DMU_OT_DDT_STATS,		/* ZAP */
 	DMU_OT_SA,			/* System attr */
 	DMU_OT_SA_MASTER_NODE,		/* ZAP */
 	DMU_OT_SA_ATTR_REGISTRATION,	/* ZAP */
 	DMU_OT_SA_ATTR_LAYOUTS,		/* ZAP */
 	DMU_OT_SCAN_XLATE,		/* ZAP */
 	DMU_OT_DEDUP,			/* fake dedup BP from ddt_bp_create() */
 	DMU_OT_NUMTYPES,
 
 	/*
 	 * Names for valid types declared with DMU_OT().
 	 */
 	DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE),
 	DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE),
 	DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE),
 	DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE),
 	DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE),
 	DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE),
 	DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE),
 	DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE),
 	DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE),
 	DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE)
 } dmu_object_type_t;
 
 typedef enum dmu_objset_type {
 	DMU_OST_NONE,
 	DMU_OST_META,
 	DMU_OST_ZFS,
 	DMU_OST_ZVOL,
 	DMU_OST_OTHER,			/* For testing only! */
 	DMU_OST_ANY,			/* Be careful! */
 	DMU_OST_NUMTYPES
 } dmu_objset_type_t;
 
 /*
  * header for all bonus and spill buffers.
  * The header has a fixed portion with a variable number
  * of "lengths" depending on the number of variable sized
  * attribues which are determined by the "layout number"
  */
 
 #define	SA_MAGIC	0x2F505A  /* ZFS SA */
 typedef struct sa_hdr_phys {
 	uint32_t sa_magic;
 	uint16_t sa_layout_info;  /* Encoded with hdrsize and layout number */
 	uint16_t sa_lengths[1];	/* optional sizes for variable length attrs */
 	/* ... Data follows the lengths.  */
 } sa_hdr_phys_t;
 
 /*
  * sa_hdr_phys -> sa_layout_info
  *
  * 16      10       0
  * +--------+-------+
  * | hdrsz  |layout |
  * +--------+-------+
  *
  * Bits 0-10 are the layout number
  * Bits 11-16 are the size of the header.
  * The hdrsize is the number * 8
  *
  * For example.
  * hdrsz of 1 ==> 8 byte header
  *          2 ==> 16 byte header
  *
  */
 
 #define	SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10)
 #define	SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0)
 #define	SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \
 { \
 	BF32_SET_SB(x, 10, 6, 3, 0, size); \
 	BF32_SET(x, 0, 10, num); \
 }
 
 #define	SA_MODE_OFFSET		0
 #define	SA_SIZE_OFFSET		8
 #define	SA_GEN_OFFSET		16
 #define	SA_UID_OFFSET		24
 #define	SA_GID_OFFSET		32
 #define	SA_PARENT_OFFSET	40
 
 /*
  * Intent log header - this on disk structure holds fields to manage
  * the log.  All fields are 64 bit to easily handle cross architectures.
  */
 typedef struct zil_header {
 	uint64_t zh_claim_txg;	/* txg in which log blocks were claimed */
 	uint64_t zh_replay_seq;	/* highest replayed sequence number */
 	blkptr_t zh_log;	/* log chain */
 	uint64_t zh_claim_seq;	/* highest claimed sequence number */
 	uint64_t zh_pad[5];
 } zil_header_t;
 
 #define	OBJSET_PHYS_SIZE 2048
 
 typedef struct objset_phys {
 	dnode_phys_t os_meta_dnode;
 	zil_header_t os_zil_header;
 	uint64_t os_type;
 	uint64_t os_flags;
 	char os_pad[OBJSET_PHYS_SIZE - sizeof (dnode_phys_t)*3 -
 	    sizeof (zil_header_t) - sizeof (uint64_t)*2];
 	dnode_phys_t os_userused_dnode;
 	dnode_phys_t os_groupused_dnode;
 } objset_phys_t;
 
 typedef struct dsl_dir_phys {
 	uint64_t dd_creation_time; /* not actually used */
 	uint64_t dd_head_dataset_obj;
 	uint64_t dd_parent_obj;
 	uint64_t dd_clone_parent_obj;
 	uint64_t dd_child_dir_zapobj;
 	/*
 	 * how much space our children are accounting for; for leaf
 	 * datasets, == physical space used by fs + snaps
 	 */
 	uint64_t dd_used_bytes;
 	uint64_t dd_compressed_bytes;
 	uint64_t dd_uncompressed_bytes;
 	/* Administrative quota setting */
 	uint64_t dd_quota;
 	/* Administrative reservation setting */
 	uint64_t dd_reserved;
 	uint64_t dd_props_zapobj;
 	uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
 } dsl_dir_phys_t;
 
 typedef struct dsl_dataset_phys {
 	uint64_t ds_dir_obj;
 	uint64_t ds_prev_snap_obj;
 	uint64_t ds_prev_snap_txg;
 	uint64_t ds_next_snap_obj;
 	uint64_t ds_snapnames_zapobj;	/* zap obj of snaps; ==0 for snaps */
 	uint64_t ds_num_children;	/* clone/snap children; ==0 for head */
 	uint64_t ds_creation_time;	/* seconds since 1970 */
 	uint64_t ds_creation_txg;
 	uint64_t ds_deadlist_obj;
 	uint64_t ds_used_bytes;
 	uint64_t ds_compressed_bytes;
 	uint64_t ds_uncompressed_bytes;
 	uint64_t ds_unique_bytes;	/* only relevant to snapshots */
 	/*
 	 * The ds_fsid_guid is a 56-bit ID that can change to avoid
 	 * collisions.  The ds_guid is a 64-bit ID that will never
 	 * change, so there is a small probability that it will collide.
 	 */
 	uint64_t ds_fsid_guid;
 	uint64_t ds_guid;
 	uint64_t ds_flags;
 	blkptr_t ds_bp;
 	uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
 } dsl_dataset_phys_t;
 
 /*
  * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
  */
 #define	DMU_POOL_DIRECTORY_OBJECT	1
 #define	DMU_POOL_CONFIG			"config"
 #define	DMU_POOL_FEATURES_FOR_READ	"features_for_read"
 #define	DMU_POOL_ROOT_DATASET		"root_dataset"
 #define	DMU_POOL_SYNC_BPLIST		"sync_bplist"
 #define	DMU_POOL_ERRLOG_SCRUB		"errlog_scrub"
 #define	DMU_POOL_ERRLOG_LAST		"errlog_last"
 #define	DMU_POOL_SPARES			"spares"
 #define	DMU_POOL_DEFLATE		"deflate"
 #define	DMU_POOL_HISTORY		"history"
 #define	DMU_POOL_PROPS			"pool_props"
+#define	DMU_POOL_CHECKSUM_SALT		"org.illumos:checksum_salt"
 
 #define	ZAP_MAGIC 0x2F52AB2ABULL
 
 #define	FZAP_BLOCK_SHIFT(zap)	((zap)->zap_block_shift)
 
 #define	ZAP_MAXCD		(uint32_t)(-1)
 #define	ZAP_HASHBITS		28
 #define	MZAP_ENT_LEN		64
 #define	MZAP_NAME_LEN		(MZAP_ENT_LEN - 8 - 4 - 2)
 #define	MZAP_MAX_BLKSHIFT	SPA_MAXBLOCKSHIFT
 #define	MZAP_MAX_BLKSZ		(1 << MZAP_MAX_BLKSHIFT)
 
 typedef struct mzap_ent_phys {
 	uint64_t mze_value;
 	uint32_t mze_cd;
 	uint16_t mze_pad;	/* in case we want to chain them someday */
 	char mze_name[MZAP_NAME_LEN];
 } mzap_ent_phys_t;
 
 typedef struct mzap_phys {
 	uint64_t mz_block_type;	/* ZBT_MICRO */
 	uint64_t mz_salt;
 	uint64_t mz_pad[6];
 	mzap_ent_phys_t mz_chunk[1];
 	/* actually variable size depending on block size */
 } mzap_phys_t;
 
 /*
  * The (fat) zap is stored in one object. It is an array of
  * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
  *
  * ptrtbl fits in first block:
  * 	[zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
  *
  * ptrtbl too big for first block:
  * 	[zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
  *
  */
 
 #define	ZBT_LEAF		((1ULL << 63) + 0)
 #define	ZBT_HEADER		((1ULL << 63) + 1)
 #define	ZBT_MICRO		((1ULL << 63) + 3)
 /* any other values are ptrtbl blocks */
 
 /*
  * the embedded pointer table takes up half a block:
  * block size / entry size (2^3) / 2
  */
 #define	ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
 
 /*
  * The embedded pointer table starts half-way through the block.  Since
  * the pointer table itself is half the block, it starts at (64-bit)
  * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
  */
 #define	ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
 	((uint64_t *)(zap)->zap_phys) \
 	[(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
 
 /*
  * TAKE NOTE:
  * If zap_phys_t is modified, zap_byteswap() must be modified.
  */
 typedef struct zap_phys {
 	uint64_t zap_block_type;	/* ZBT_HEADER */
 	uint64_t zap_magic;		/* ZAP_MAGIC */
 
 	struct zap_table_phys {
 		uint64_t zt_blk;	/* starting block number */
 		uint64_t zt_numblks;	/* number of blocks */
 		uint64_t zt_shift;	/* bits to index it */
 		uint64_t zt_nextblk;	/* next (larger) copy start block */
 		uint64_t zt_blks_copied; /* number source blocks copied */
 	} zap_ptrtbl;
 
 	uint64_t zap_freeblk;		/* the next free block */
 	uint64_t zap_num_leafs;		/* number of leafs */
 	uint64_t zap_num_entries;	/* number of entries */
 	uint64_t zap_salt;		/* salt to stir into hash function */
 	/*
 	 * This structure is followed by padding, and then the embedded
 	 * pointer table.  The embedded pointer table takes up second
 	 * half of the block.  It is accessed using the
 	 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
 	 */
 } zap_phys_t;
 
 typedef struct zap_table_phys zap_table_phys_t;
 
 typedef struct fat_zap {
 	int zap_block_shift;			/* block size shift */
 	zap_phys_t *zap_phys;
 } fat_zap_t;
 
 #define	ZAP_LEAF_MAGIC 0x2AB1EAF
 
 /* chunk size = 24 bytes */
 #define	ZAP_LEAF_CHUNKSIZE 24
 
 /*
  * The amount of space available for chunks is:
  * block size (1<<l->l_bs) - hash entry size (2) * number of hash
  * entries - header space (2*chunksize)
  */
 #define	ZAP_LEAF_NUMCHUNKS(l) \
 	(((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
 	ZAP_LEAF_CHUNKSIZE - 2)
 
 /*
  * The amount of space within the chunk available for the array is:
  * chunk size - space for type (1) - space for next pointer (2)
  */
 #define	ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
 
 #define	ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
 	(((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
 
 /*
  * Low water mark:  when there are only this many chunks free, start
  * growing the ptrtbl.  Ideally, this should be larger than a
  * "reasonably-sized" entry.  20 chunks is more than enough for the
  * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
  * while still being only around 3% for 16k blocks.
  */
 #define	ZAP_LEAF_LOW_WATER (20)
 
 /*
  * The leaf hash table has block size / 2^5 (32) number of entries,
  * which should be more than enough for the maximum number of entries,
  * which is less than block size / CHUNKSIZE (24) / minimum number of
  * chunks per entry (3).
  */
 #define	ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
 #define	ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
 
 /*
  * The chunks start immediately after the hash table.  The end of the
  * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
  * chunk_t.
  */
 #define	ZAP_LEAF_CHUNK(l, idx) \
 	((zap_leaf_chunk_t *) \
 	((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
 #define	ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
 
 typedef enum zap_chunk_type {
 	ZAP_CHUNK_FREE = 253,
 	ZAP_CHUNK_ENTRY = 252,
 	ZAP_CHUNK_ARRAY = 251,
 	ZAP_CHUNK_TYPE_MAX = 250
 } zap_chunk_type_t;
 
 /*
  * TAKE NOTE:
  * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
  */
 typedef struct zap_leaf_phys {
 	struct zap_leaf_header {
 		uint64_t lh_block_type;		/* ZBT_LEAF */
 		uint64_t lh_pad1;
 		uint64_t lh_prefix;		/* hash prefix of this leaf */
 		uint32_t lh_magic;		/* ZAP_LEAF_MAGIC */
 		uint16_t lh_nfree;		/* number free chunks */
 		uint16_t lh_nentries;		/* number of entries */
 		uint16_t lh_prefix_len;		/* num bits used to id this */
 
 /* above is accessable to zap, below is zap_leaf private */
 
 		uint16_t lh_freelist;		/* chunk head of free list */
 		uint8_t lh_pad2[12];
 	} l_hdr; /* 2 24-byte chunks */
 
 	/*
 	 * The header is followed by a hash table with
 	 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries.  The hash table is
 	 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
 	 * zap_leaf_chunk structures.  These structures are accessed
 	 * with the ZAP_LEAF_CHUNK() macro.
 	 */
 
 	uint16_t l_hash[1];
 } zap_leaf_phys_t;
 
 typedef union zap_leaf_chunk {
 	struct zap_leaf_entry {
 		uint8_t le_type; 		/* always ZAP_CHUNK_ENTRY */
 		uint8_t le_value_intlen;	/* size of ints */
 		uint16_t le_next;		/* next entry in hash chain */
 		uint16_t le_name_chunk;		/* first chunk of the name */
 		uint16_t le_name_numints;	/* bytes in name, incl null */
 		uint16_t le_value_chunk;	/* first chunk of the value */
 		uint16_t le_value_numints;	/* value length in ints */
 		uint32_t le_cd;			/* collision differentiator */
 		uint64_t le_hash;		/* hash value of the name */
 	} l_entry;
 	struct zap_leaf_array {
 		uint8_t la_type;		/* always ZAP_CHUNK_ARRAY */
 		uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
 		uint16_t la_next;		/* next blk or CHAIN_END */
 	} l_array;
 	struct zap_leaf_free {
 		uint8_t lf_type;		/* always ZAP_CHUNK_FREE */
 		uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
 		uint16_t lf_next;	/* next in free list, or CHAIN_END */
 	} l_free;
 } zap_leaf_chunk_t;
 
 typedef struct zap_leaf {
 	int l_bs;			/* block size shift */
 	zap_leaf_phys_t *l_phys;
 } zap_leaf_t;
 
 /*
  * Define special zfs pflags
  */
 #define	ZFS_XATTR	0x1		/* is an extended attribute */
 #define	ZFS_INHERIT_ACE	0x2		/* ace has inheritable ACEs */
 #define	ZFS_ACL_TRIVIAL 0x4		/* files ACL is trivial */
 
 #define	MASTER_NODE_OBJ	1
 
 /*
  * special attributes for master node.
  */
 
 #define	ZFS_FSID		"FSID"
 #define	ZFS_UNLINKED_SET	"DELETE_QUEUE"
 #define	ZFS_ROOT_OBJ		"ROOT"
 #define	ZPL_VERSION_OBJ		"VERSION"
 #define	ZFS_PROP_BLOCKPERPAGE	"BLOCKPERPAGE"
 #define	ZFS_PROP_NOGROWBLOCKS	"NOGROWBLOCKS"
 
 #define	ZFS_FLAG_BLOCKPERPAGE	0x1
 #define	ZFS_FLAG_NOGROWBLOCKS	0x2
 
 /*
  * ZPL version - rev'd whenever an incompatible on-disk format change
  * occurs.  Independent of SPA/DMU/ZAP versioning.
  */
 
 #define	ZPL_VERSION		1ULL
 
 /*
  * The directory entry has the type (currently unused on Solaris) in the
  * top 4 bits, and the object number in the low 48 bits.  The "middle"
  * 12 bits are unused.
  */
 #define	ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
 #define	ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
 #define	ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
 
 typedef struct ace {
 	uid_t		a_who;		/* uid or gid */
 	uint32_t	a_access_mask;	/* read,write,... */
 	uint16_t	a_flags;	/* see below */
 	uint16_t	a_type;		/* allow or deny */
 } ace_t;
 
 #define ACE_SLOT_CNT	6
 
 typedef struct zfs_znode_acl {
 	uint64_t	z_acl_extern_obj;	  /* ext acl pieces */
 	uint32_t	z_acl_count;		  /* Number of ACEs */
 	uint16_t	z_acl_version;		  /* acl version */
 	uint16_t	z_acl_pad;		  /* pad */
 	ace_t		z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
 } zfs_znode_acl_t;
 
 /*
  * This is the persistent portion of the znode.  It is stored
  * in the "bonus buffer" of the file.  Short symbolic links
  * are also stored in the bonus buffer.
  */
 typedef struct znode_phys {
 	uint64_t zp_atime[2];		/*  0 - last file access time */
 	uint64_t zp_mtime[2];		/* 16 - last file modification time */
 	uint64_t zp_ctime[2];		/* 32 - last file change time */
 	uint64_t zp_crtime[2];		/* 48 - creation time */
 	uint64_t zp_gen;		/* 64 - generation (txg of creation) */
 	uint64_t zp_mode;		/* 72 - file mode bits */
 	uint64_t zp_size;		/* 80 - size of file */
 	uint64_t zp_parent;		/* 88 - directory parent (`..') */
 	uint64_t zp_links;		/* 96 - number of links to file */
 	uint64_t zp_xattr;		/* 104 - DMU object for xattrs */
 	uint64_t zp_rdev;		/* 112 - dev_t for VBLK & VCHR files */
 	uint64_t zp_flags;		/* 120 - persistent flags */
 	uint64_t zp_uid;		/* 128 - file owner */
 	uint64_t zp_gid;		/* 136 - owning group */
 	uint64_t zp_pad[4];		/* 144 - future */
 	zfs_znode_acl_t zp_acl;		/* 176 - 263 ACL */
 	/*
 	 * Data may pad out any remaining bytes in the znode buffer, eg:
 	 *
 	 * |<---------------------- dnode_phys (512) ------------------------>|
 	 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
 	 *			|<---- znode (264) ---->|<---- data (56) ---->|
 	 *
 	 * At present, we only use this space to store symbolic links.
 	 */
 } znode_phys_t;
 
 /*
  * In-core vdev representation.
  */
 struct vdev;
+struct spa;
 typedef int vdev_phys_read_t(struct vdev *vdev, void *priv,
     off_t offset, void *buf, size_t bytes);
 typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp,
     void *buf, off_t offset, size_t bytes);
 
 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
 
 typedef struct vdev {
 	STAILQ_ENTRY(vdev) v_childlink;	/* link in parent's child list */
 	STAILQ_ENTRY(vdev) v_alllink;	/* link in global vdev list */
 	vdev_list_t	v_children;	/* children of this vdev */
 	const char	*v_name;	/* vdev name */
 	uint64_t	v_guid;		/* vdev guid */
 	int		v_id;		/* index in parent */
 	int		v_ashift;	/* offset to block shift */
 	int		v_nparity;	/* # parity for raidz */
 	struct vdev	*v_top;		/* parent vdev */
 	int		v_nchildren;	/* # children */
 	vdev_state_t	v_state;	/* current state */
 	vdev_phys_read_t *v_phys_read;	/* read from raw leaf vdev */
 	vdev_read_t	*v_read;	/* read from vdev */
 	void		*v_read_priv;	/* private data for read function */
+	struct spa	*spa;		/* link to spa */
 } vdev_t;
 
 /*
  * In-core pool representation.
  */
 typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
 
 typedef struct spa {
 	STAILQ_ENTRY(spa) spa_link;	/* link in global pool list */
 	char		*spa_name;	/* pool name */
 	uint64_t	spa_guid;	/* pool guid */
 	uint64_t	spa_txg;	/* most recent transaction */
 	struct uberblock spa_uberblock;	/* best uberblock so far */
 	vdev_list_t	spa_vdevs;	/* list of all toplevel vdevs */
 	objset_phys_t	spa_mos;	/* MOS for this pool */
+	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
+	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
 	int		spa_inited;	/* initialized */
 } spa_t;
 
 static void decode_embedded_bp_compressed(const blkptr_t *, void *);
Index: head/sys/cddl/boot/zfs/zfssubr.c
===================================================================
--- head/sys/cddl/boot/zfs/zfssubr.c	(revision 304320)
+++ head/sys/cddl/boot/zfs/zfssubr.c	(revision 304321)
@@ -1,1703 +1,1784 @@
 /*
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
  * Common Development and Distribution License (the "License").
  * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
  * See the License for the specific language governing permissions
  * and limitations under the License.
  *
  * When distributing Covered Code, include this CDDL HEADER in each
  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  * If applicable, add the following below this CDDL HEADER, with the
  * fields enclosed by brackets "[]" replaced with your own identifying
  * information: Portions Copyright [yyyy] [name of copyright owner]
  *
  * CDDL HEADER END
  */
 /*
  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
 static uint64_t zfs_crc64_table[256];
 
 #define	ECKSUM	666
 
 #define	ASSERT3S(x, y, z)	((void)0)
 #define	ASSERT3U(x, y, z)	((void)0)
 #define	ASSERT3P(x, y, z)	((void)0)
 #define	ASSERT0(x)		((void)0)
 #define	ASSERT(x)		((void)0)
 
 #define	panic(...)	do {						\
 	printf(__VA_ARGS__);						\
 	for (;;) ;							\
 } while (0)
 
 #define	kmem_alloc(size, flag)	zfs_alloc((size))
 #define	kmem_free(ptr, size)	zfs_free((ptr), (size))
 
 static void
 zfs_init_crc(void)
 {
 	int i, j;
 	uint64_t *ct;
 
 	/*
 	 * Calculate the crc64 table (used for the zap hash
 	 * function).
 	 */
 	if (zfs_crc64_table[128] != ZFS_CRC64_POLY) {
 		memset(zfs_crc64_table, 0, sizeof(zfs_crc64_table));
 		for (i = 0; i < 256; i++)
 			for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
 				*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
 	}
 }
 
 static void
-zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
+zio_checksum_off(const void *buf, uint64_t size,
+    const void *ctx_template, zio_cksum_t *zcp)
 {
 	ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
 }
 
 /*
  * Signature for checksum functions.
  */
-typedef void zio_checksum_t(const void *data, uint64_t size, zio_cksum_t *zcp);
+typedef void zio_checksum_t(const void *data, uint64_t size,
+    const void *ctx_template, zio_cksum_t *zcp);
+typedef void *zio_checksum_tmpl_init_t(const zio_cksum_salt_t *salt);
+typedef void zio_checksum_tmpl_free_t(void *ctx_template);
 
+typedef enum zio_checksum_flags {
+	/* Strong enough for metadata? */
+	ZCHECKSUM_FLAG_METADATA = (1 << 1),
+	/* ZIO embedded checksum */
+	ZCHECKSUM_FLAG_EMBEDDED = (1 << 2),
+	/* Strong enough for dedup (without verification)? */
+	ZCHECKSUM_FLAG_DEDUP = (1 << 3),
+	/* Uses salt value */
+	ZCHECKSUM_FLAG_SALTED = (1 << 4),
+	/* Strong enough for nopwrite? */
+	ZCHECKSUM_FLAG_NOPWRITE = (1 << 5)
+} zio_checksum_flags_t;
+
 /*
  * Information about each checksum function.
  */
 typedef struct zio_checksum_info {
-	zio_checksum_t	*ci_func[2]; /* checksum function for each byteorder */
-	int		ci_correctable;	/* number of correctable bits	*/
-	int		ci_eck;		/* uses zio embedded checksum? */
-	int		ci_dedup;	/* strong enough for dedup? */
-	const char	*ci_name;	/* descriptive name */
+	/* checksum function for each byteorder */
+	zio_checksum_t			*ci_func[2];
+	zio_checksum_tmpl_init_t	*ci_tmpl_init;
+	zio_checksum_tmpl_free_t	*ci_tmpl_free;
+	zio_checksum_flags_t		ci_flags;
+	const char			*ci_name;	/* descriptive name */
 } zio_checksum_info_t;
 
 #include "blkptr.c"
 
 #include "fletcher.c"
 #include "sha256.c"
+#include "skein_zfs.c"
 
 static zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
-	{{NULL,			NULL},			0, 0, 0, "inherit"},
-	{{NULL,			NULL},			0, 0, 0, "on"},
-	{{zio_checksum_off,	zio_checksum_off},	0, 0, 0, "off"},
-	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 1, 0, "label"},
-	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 1, 0, "gang_header"},
-	{{fletcher_2_native,	fletcher_2_byteswap},	0, 1, 0, "zilog"},
-	{{fletcher_2_native,	fletcher_2_byteswap},	0, 0, 0, "fletcher2"},
-	{{fletcher_4_native,	fletcher_4_byteswap},	1, 0, 0, "fletcher4"},
-	{{zio_checksum_SHA256,	zio_checksum_SHA256},	1, 0, 1, "SHA256"},
-	{{fletcher_4_native,	fletcher_4_byteswap},	0, 1, 0, "zillog2"},
+	{{NULL, NULL}, NULL, NULL, 0, "inherit"},
+	{{NULL, NULL}, NULL, NULL, 0, "on"},
+	{{zio_checksum_off,	zio_checksum_off}, NULL, NULL, 0, "off"},
+	{{zio_checksum_SHA256,	zio_checksum_SHA256}, NULL, NULL,
+	    ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED, "label"},
+	{{zio_checksum_SHA256,	zio_checksum_SHA256}, NULL, NULL,
+	    ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED, "gang_header"},
+	{{fletcher_2_native,	fletcher_2_byteswap}, NULL, NULL,
+	    ZCHECKSUM_FLAG_EMBEDDED, "zilog"},
+	{{fletcher_2_native,	fletcher_2_byteswap}, NULL, NULL,
+	    0, "fletcher2"},
+	{{fletcher_4_native,	fletcher_4_byteswap}, NULL, NULL,
+	    ZCHECKSUM_FLAG_METADATA, "fletcher4"},
+	{{zio_checksum_SHA256,	zio_checksum_SHA256}, NULL, NULL,
+	    ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
+	    ZCHECKSUM_FLAG_NOPWRITE, "SHA256"},
+	{{fletcher_4_native,	fletcher_4_byteswap}, NULL, NULL,
+	    ZCHECKSUM_FLAG_EMBEDDED, "zillog2"},
+	{{zio_checksum_off,	zio_checksum_off}, NULL, NULL,
+	    0, "noparity"},
+	{{zio_checksum_SHA512_native,	zio_checksum_SHA512_byteswap},
+	    NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
+	    ZCHECKSUM_FLAG_NOPWRITE, "SHA512"},
+	{{zio_checksum_skein_native, zio_checksum_skein_byteswap},
+	    zio_checksum_skein_tmpl_init, zio_checksum_skein_tmpl_free,
+	    ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
+	    ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "skein"},
+	/* no edonr for now */
+	{{NULL, NULL}, NULL, NULL, ZCHECKSUM_FLAG_METADATA |
+	    ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "edonr"}
 };
 
-
 /*
  * Common signature for all zio compress/decompress functions.
  */
 typedef size_t zio_compress_func_t(void *src, void *dst,
     size_t s_len, size_t d_len, int);
 typedef int zio_decompress_func_t(void *src, void *dst,
     size_t s_len, size_t d_len, int);
 
 /*
  * Information about each compression function.
  */
 typedef struct zio_compress_info {
 	zio_compress_func_t	*ci_compress;	/* compression function */
 	zio_decompress_func_t	*ci_decompress;	/* decompression function */
 	int			ci_level;	/* level parameter */
 	const char		*ci_name;	/* algorithm name */
 } zio_compress_info_t;
 
 #include "lzjb.c"
 #include "zle.c"
 #include "lz4.c"
 
 /*
  * Compression vectors.
  */
 static zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
 	{NULL,			NULL,			0,	"inherit"},
 	{NULL,			NULL,			0,	"on"},
 	{NULL,			NULL,			0,	"uncompressed"},
 	{NULL,			lzjb_decompress,	0,	"lzjb"},
 	{NULL,			NULL,			0,	"empty"},
 	{NULL,			NULL,			1,	"gzip-1"},
 	{NULL,			NULL,			2,	"gzip-2"},
 	{NULL,			NULL,			3,	"gzip-3"},
 	{NULL,			NULL,			4,	"gzip-4"},
 	{NULL,			NULL,			5,	"gzip-5"},
 	{NULL,			NULL,			6,	"gzip-6"},
 	{NULL,			NULL,			7,	"gzip-7"},
 	{NULL,			NULL,			8,	"gzip-8"},
 	{NULL,			NULL,			9,	"gzip-9"},
 	{NULL,			zle_decompress,		64,	"zle"},
 	{NULL,			lz4_decompress,		0,	"lz4"},
 };
 
 static void
 byteswap_uint64_array(void *vbuf, size_t size)
 {
 	uint64_t *buf = vbuf;
 	size_t count = size >> 3;
 	int i;
 
 	ASSERT((size & 7) == 0);
 
 	for (i = 0; i < count; i++)
 		buf[i] = BSWAP_64(buf[i]);
 }
 
 /*
  * Set the external verifier for a gang block based on <vdev, offset, txg>,
  * a tuple which is guaranteed to be unique for the life of the pool.
  */
 static void
 zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
 {
 	const dva_t *dva = BP_IDENTITY(bp);
 	uint64_t txg = BP_PHYSICAL_BIRTH(bp);
 
 	ASSERT(BP_IS_GANG(bp));
 
 	ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
 }
 
 /*
  * Set the external verifier for a label block based on its offset.
  * The vdev is implicit, and the txg is unknowable at pool open time --
  * hence the logic in vdev_uberblock_load() to find the most recent copy.
  */
 static void
 zio_checksum_label_verifier(zio_cksum_t *zcp, uint64_t offset)
 {
 	ZIO_SET_CHECKSUM(zcp, offset, 0, 0, 0);
 }
 
+/*
+ * Calls the template init function of a checksum which supports context
+ * templates and installs the template into the spa_t.
+ */
+static void
+zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
+{
+	zio_checksum_info_t *ci = &zio_checksum_table[checksum];
+
+	if (ci->ci_tmpl_init == NULL)
+		return;
+
+	if (spa->spa_cksum_tmpls[checksum] != NULL)
+		return;
+
+	if (spa->spa_cksum_tmpls[checksum] == NULL) {
+		spa->spa_cksum_tmpls[checksum] =
+		    ci->ci_tmpl_init(&spa->spa_cksum_salt);
+	}
+}
+
+/*
+ * Called by a spa_t that's about to be deallocated. This steps through
+ * all of the checksum context templates and deallocates any that were
+ * initialized using the algorithm-specific template init function.
+ */
+void
+zio_checksum_templates_free(spa_t *spa)
+{
+	for (enum zio_checksum checksum = 0;
+	    checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
+		if (spa->spa_cksum_tmpls[checksum] != NULL) {
+			zio_checksum_info_t *ci = &zio_checksum_table[checksum];
+
+			ci->ci_tmpl_free(spa->spa_cksum_tmpls[checksum]);
+			spa->spa_cksum_tmpls[checksum] = NULL;
+		}
+	}
+}
+
 static int
-zio_checksum_verify(const blkptr_t *bp, void *data)
+zio_checksum_verify(const spa_t *spa, const blkptr_t *bp, void *data)
 {
 	uint64_t size;
 	unsigned int checksum;
 	zio_checksum_info_t *ci;
 	zio_cksum_t actual_cksum, expected_cksum, verifier;
 	int byteswap;
 
 	checksum = BP_GET_CHECKSUM(bp);
 	size = BP_GET_PSIZE(bp);
 
 	if (checksum >= ZIO_CHECKSUM_FUNCTIONS)
 		return (EINVAL);
 	ci = &zio_checksum_table[checksum];
 	if (ci->ci_func[0] == NULL || ci->ci_func[1] == NULL)
 		return (EINVAL);
 
-	if (ci->ci_eck) {
+	zio_checksum_template_init(checksum, (spa_t *) spa);
+	if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
 		zio_eck_t *eck;
 
 		ASSERT(checksum == ZIO_CHECKSUM_GANG_HEADER ||
 		    checksum == ZIO_CHECKSUM_LABEL);
 
 		eck = (zio_eck_t *)((char *)data + size) - 1;
 
 		if (checksum == ZIO_CHECKSUM_GANG_HEADER)
 			zio_checksum_gang_verifier(&verifier, bp);
 		else if (checksum == ZIO_CHECKSUM_LABEL)
 			zio_checksum_label_verifier(&verifier,
 			    DVA_GET_OFFSET(BP_IDENTITY(bp)));
 		else
 			verifier = bp->blk_cksum;
 
 		byteswap = (eck->zec_magic == BSWAP_64(ZEC_MAGIC));
 
 		if (byteswap)
 			byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
 
 		expected_cksum = eck->zec_cksum;
 		eck->zec_cksum = verifier;
-		ci->ci_func[byteswap](data, size, &actual_cksum);
+		ci->ci_func[byteswap](data, size,
+		    spa->spa_cksum_tmpls[checksum], &actual_cksum);
 		eck->zec_cksum = expected_cksum;
 
 		if (byteswap)
 			byteswap_uint64_array(&expected_cksum,
 			    sizeof (zio_cksum_t));
 	} else {
 		expected_cksum = bp->blk_cksum;
-		ci->ci_func[0](data, size, &actual_cksum);
+		ci->ci_func[0](data, size, spa->spa_cksum_tmpls[checksum],
+		    &actual_cksum);
 	}
 
 	if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum)) {
-		/*printf("ZFS: read checksum failed\n");*/
+		/*printf("ZFS: read checksum %s failed\n", ci->ci_name);*/
 		return (EIO);
 	}
 
 	return (0);
 }
 
 static int
 zio_decompress_data(int cpfunc, void *src, uint64_t srcsize,
 	void *dest, uint64_t destsize)
 {
 	zio_compress_info_t *ci;
 
 	if (cpfunc >= ZIO_COMPRESS_FUNCTIONS) {
 		printf("ZFS: unsupported compression algorithm %u\n", cpfunc);
 		return (EIO);
 	}
 
 	ci = &zio_compress_table[cpfunc];
 	if (!ci->ci_decompress) {
 		printf("ZFS: unsupported compression algorithm %s\n",
 		    ci->ci_name);
 		return (EIO);
 	}
 
 	return (ci->ci_decompress(src, dest, srcsize, destsize, ci->ci_level));
 }
 
 static uint64_t
 zap_hash(uint64_t salt, const char *name)
 {
 	const uint8_t *cp;
 	uint8_t c;
 	uint64_t crc = salt;
 
 	ASSERT(crc != 0);
 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 	for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ c) & 0xFF];
 
 	/*
 	 * Only use 28 bits, since we need 4 bits in the cookie for the
 	 * collision differentiator.  We MUST use the high bits, since
 	 * those are the onces that we first pay attention to when
 	 * chosing the bucket.
 	 */
 	crc &= ~((1ULL << (64 - ZAP_HASHBITS)) - 1);
 
 	return (crc);
 }
 
 static void *zfs_alloc(size_t size);
 static void zfs_free(void *ptr, size_t size);
 
 typedef struct raidz_col {
 	uint64_t rc_devidx;		/* child device index for I/O */
 	uint64_t rc_offset;		/* device offset */
 	uint64_t rc_size;		/* I/O size */
 	void *rc_data;			/* I/O data */
 	int rc_error;			/* I/O error for this device */
 	uint8_t rc_tried;		/* Did we attempt this I/O column? */
 	uint8_t rc_skipped;		/* Did we skip this I/O column? */
 } raidz_col_t;
 
 typedef struct raidz_map {
 	uint64_t rm_cols;		/* Regular column count */
 	uint64_t rm_scols;		/* Count including skipped columns */
 	uint64_t rm_bigcols;		/* Number of oversized columns */
 	uint64_t rm_asize;		/* Actual total I/O size */
 	uint64_t rm_missingdata;	/* Count of missing data devices */
 	uint64_t rm_missingparity;	/* Count of missing parity devices */
 	uint64_t rm_firstdatacol;	/* First data column/parity count */
 	uint64_t rm_nskip;		/* Skipped sectors for padding */
 	uint64_t rm_skipstart;		/* Column index of padding start */
 	uintptr_t rm_reports;		/* # of referencing checksum reports */
 	uint8_t	rm_freed;		/* map no longer has referencing ZIO */
 	uint8_t	rm_ecksuminjected;	/* checksum error was injected */
 	raidz_col_t rm_col[1];		/* Flexible array of I/O columns */
 } raidz_map_t;
 
 #define	VDEV_RAIDZ_P		0
 #define	VDEV_RAIDZ_Q		1
 #define	VDEV_RAIDZ_R		2
 
 #define	VDEV_RAIDZ_MUL_2(x)	(((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
 #define	VDEV_RAIDZ_MUL_4(x)	(VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
 
 /*
  * We provide a mechanism to perform the field multiplication operation on a
  * 64-bit value all at once rather than a byte at a time. This works by
  * creating a mask from the top bit in each byte and using that to
  * conditionally apply the XOR of 0x1d.
  */
 #define	VDEV_RAIDZ_64MUL_2(x, mask) \
 { \
 	(mask) = (x) & 0x8080808080808080ULL; \
 	(mask) = ((mask) << 1) - ((mask) >> 7); \
 	(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
 	    ((mask) & 0x1d1d1d1d1d1d1d1dULL); \
 }
 
 #define	VDEV_RAIDZ_64MUL_4(x, mask) \
 { \
 	VDEV_RAIDZ_64MUL_2((x), mask); \
 	VDEV_RAIDZ_64MUL_2((x), mask); \
 }
 
 /*
  * These two tables represent powers and logs of 2 in the Galois field defined
  * above. These values were computed by repeatedly multiplying by 2 as above.
  */
 static const uint8_t vdev_raidz_pow2[256] = {
 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
 	0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26,
 	0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9,
 	0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0,
 	0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35,
 	0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23,
 	0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0,
 	0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1,
 	0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc,
 	0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0,
 	0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f,
 	0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2,
 	0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88,
 	0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce,
 	0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93,
 	0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc,
 	0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9,
 	0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54,
 	0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa,
 	0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73,
 	0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e,
 	0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff,
 	0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4,
 	0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41,
 	0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e,
 	0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6,
 	0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef,
 	0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09,
 	0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5,
 	0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16,
 	0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83,
 	0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01
 };
 static const uint8_t vdev_raidz_log2[256] = {
 	0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6,
 	0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b,
 	0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81,
 	0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71,
 	0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21,
 	0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45,
 	0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9,
 	0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6,
 	0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd,
 	0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88,
 	0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd,
 	0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40,
 	0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e,
 	0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d,
 	0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b,
 	0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57,
 	0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d,
 	0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18,
 	0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c,
 	0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e,
 	0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd,
 	0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61,
 	0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e,
 	0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2,
 	0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76,
 	0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6,
 	0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa,
 	0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a,
 	0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51,
 	0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7,
 	0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8,
 	0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf,
 };
 
 /*
  * Multiply a given number by 2 raised to the given power.
  */
 static uint8_t
 vdev_raidz_exp2(uint8_t a, int exp)
 {
 	if (a == 0)
 		return (0);
 
 	ASSERT(exp >= 0);
 	ASSERT(vdev_raidz_log2[a] > 0 || a == 1);
 
 	exp += vdev_raidz_log2[a];
 	if (exp > 255)
 		exp -= 255;
 
 	return (vdev_raidz_pow2[exp]);
 }
 
 static void
 vdev_raidz_generate_parity_p(raidz_map_t *rm)
 {
 	uint64_t *p, *src, pcount, ccount, i;
 	int c;
 
 	pcount = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0]);
 
 	for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
 		src = rm->rm_col[c].rc_data;
 		p = rm->rm_col[VDEV_RAIDZ_P].rc_data;
 		ccount = rm->rm_col[c].rc_size / sizeof (src[0]);
 
 		if (c == rm->rm_firstdatacol) {
 			ASSERT(ccount == pcount);
 			for (i = 0; i < ccount; i++, src++, p++) {
 				*p = *src;
 			}
 		} else {
 			ASSERT(ccount <= pcount);
 			for (i = 0; i < ccount; i++, src++, p++) {
 				*p ^= *src;
 			}
 		}
 	}
 }
 
 static void
 vdev_raidz_generate_parity_pq(raidz_map_t *rm)
 {
 	uint64_t *p, *q, *src, pcnt, ccnt, mask, i;
 	int c;
 
 	pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0]);
 	ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
 	    rm->rm_col[VDEV_RAIDZ_Q].rc_size);
 
 	for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
 		src = rm->rm_col[c].rc_data;
 		p = rm->rm_col[VDEV_RAIDZ_P].rc_data;
 		q = rm->rm_col[VDEV_RAIDZ_Q].rc_data;
 
 		ccnt = rm->rm_col[c].rc_size / sizeof (src[0]);
 
 		if (c == rm->rm_firstdatacol) {
 			ASSERT(ccnt == pcnt || ccnt == 0);
 			for (i = 0; i < ccnt; i++, src++, p++, q++) {
 				*p = *src;
 				*q = *src;
 			}
 			for (; i < pcnt; i++, src++, p++, q++) {
 				*p = 0;
 				*q = 0;
 			}
 		} else {
 			ASSERT(ccnt <= pcnt);
 
 			/*
 			 * Apply the algorithm described above by multiplying
 			 * the previous result and adding in the new value.
 			 */
 			for (i = 0; i < ccnt; i++, src++, p++, q++) {
 				*p ^= *src;
 
 				VDEV_RAIDZ_64MUL_2(*q, mask);
 				*q ^= *src;
 			}
 
 			/*
 			 * Treat short columns as though they are full of 0s.
 			 * Note that there's therefore nothing needed for P.
 			 */
 			for (; i < pcnt; i++, q++) {
 				VDEV_RAIDZ_64MUL_2(*q, mask);
 			}
 		}
 	}
 }
 
 static void
 vdev_raidz_generate_parity_pqr(raidz_map_t *rm)
 {
 	uint64_t *p, *q, *r, *src, pcnt, ccnt, mask, i;
 	int c;
 
 	pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (src[0]);
 	ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
 	    rm->rm_col[VDEV_RAIDZ_Q].rc_size);
 	ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
 	    rm->rm_col[VDEV_RAIDZ_R].rc_size);
 
 	for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
 		src = rm->rm_col[c].rc_data;
 		p = rm->rm_col[VDEV_RAIDZ_P].rc_data;
 		q = rm->rm_col[VDEV_RAIDZ_Q].rc_data;
 		r = rm->rm_col[VDEV_RAIDZ_R].rc_data;
 
 		ccnt = rm->rm_col[c].rc_size / sizeof (src[0]);
 
 		if (c == rm->rm_firstdatacol) {
 			ASSERT(ccnt == pcnt || ccnt == 0);
 			for (i = 0; i < ccnt; i++, src++, p++, q++, r++) {
 				*p = *src;
 				*q = *src;
 				*r = *src;
 			}
 			for (; i < pcnt; i++, src++, p++, q++, r++) {
 				*p = 0;
 				*q = 0;
 				*r = 0;
 			}
 		} else {
 			ASSERT(ccnt <= pcnt);
 
 			/*
 			 * Apply the algorithm described above by multiplying
 			 * the previous result and adding in the new value.
 			 */
 			for (i = 0; i < ccnt; i++, src++, p++, q++, r++) {
 				*p ^= *src;
 
 				VDEV_RAIDZ_64MUL_2(*q, mask);
 				*q ^= *src;
 
 				VDEV_RAIDZ_64MUL_4(*r, mask);
 				*r ^= *src;
 			}
 
 			/*
 			 * Treat short columns as though they are full of 0s.
 			 * Note that there's therefore nothing needed for P.
 			 */
 			for (; i < pcnt; i++, q++, r++) {
 				VDEV_RAIDZ_64MUL_2(*q, mask);
 				VDEV_RAIDZ_64MUL_4(*r, mask);
 			}
 		}
 	}
 }
 
 /*
  * Generate RAID parity in the first virtual columns according to the number of
  * parity columns available.
  */
 static void
 vdev_raidz_generate_parity(raidz_map_t *rm)
 {
 	switch (rm->rm_firstdatacol) {
 	case 1:
 		vdev_raidz_generate_parity_p(rm);
 		break;
 	case 2:
 		vdev_raidz_generate_parity_pq(rm);
 		break;
 	case 3:
 		vdev_raidz_generate_parity_pqr(rm);
 		break;
 	default:
 		panic("invalid RAID-Z configuration");
 	}
 }
 
 /* BEGIN CSTYLED */
 /*
  * In the general case of reconstruction, we must solve the system of linear
  * equations defined by the coeffecients used to generate parity as well as
  * the contents of the data and parity disks. This can be expressed with
  * vectors for the original data (D) and the actual data (d) and parity (p)
  * and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
  *
  *            __   __                     __     __
  *            |     |         __     __   |  p_0  |
  *            |  V  |         |  D_0  |   | p_m-1 |
  *            |     |    x    |   :   | = |  d_0  |
  *            |  I  |         | D_n-1 |   |   :   |
  *            |     |         ~~     ~~   | d_n-1 |
  *            ~~   ~~                     ~~     ~~
  *
  * I is simply a square identity matrix of size n, and V is a vandermonde
  * matrix defined by the coeffecients we chose for the various parity columns
  * (1, 2, 4). Note that these values were chosen both for simplicity, speedy
  * computation as well as linear separability.
  *
  *      __               __               __     __
  *      |   1   ..  1 1 1 |               |  p_0  |
  *      | 2^n-1 ..  4 2 1 |   __     __   |   :   |
  *      | 4^n-1 .. 16 4 1 |   |  D_0  |   | p_m-1 |
  *      |   1   ..  0 0 0 |   |  D_1  |   |  d_0  |
  *      |   0   ..  0 0 0 | x |  D_2  | = |  d_1  |
  *      |   :       : : : |   |   :   |   |  d_2  |
  *      |   0   ..  1 0 0 |   | D_n-1 |   |   :   |
  *      |   0   ..  0 1 0 |   ~~     ~~   |   :   |
  *      |   0   ..  0 0 1 |               | d_n-1 |
  *      ~~               ~~               ~~     ~~
  *
  * Note that I, V, d, and p are known. To compute D, we must invert the
  * matrix and use the known data and parity values to reconstruct the unknown
  * data values. We begin by removing the rows in V|I and d|p that correspond
  * to failed or missing columns; we then make V|I square (n x n) and d|p
  * sized n by removing rows corresponding to unused parity from the bottom up
  * to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
  * using Gauss-Jordan elimination. In the example below we use m=3 parity
  * columns, n=8 data columns, with errors in d_1, d_2, and p_1:
  *           __                               __
  *           |  1   1   1   1   1   1   1   1  |
  *           | 128  64  32  16  8   4   2   1  | <-----+-+-- missing disks
  *           |  19 205 116  29  64  16  4   1  |      / /
  *           |  1   0   0   0   0   0   0   0  |     / /
  *           |  0   1   0   0   0   0   0   0  | <--' /
  *  (V|I)  = |  0   0   1   0   0   0   0   0  | <---'
  *           |  0   0   0   1   0   0   0   0  |
  *           |  0   0   0   0   1   0   0   0  |
  *           |  0   0   0   0   0   1   0   0  |
  *           |  0   0   0   0   0   0   1   0  |
  *           |  0   0   0   0   0   0   0   1  |
  *           ~~                               ~~
  *           __                               __
  *           |  1   1   1   1   1   1   1   1  |
  *           | 128  64  32  16  8   4   2   1  |
  *           |  19 205 116  29  64  16  4   1  |
  *           |  1   0   0   0   0   0   0   0  |
  *           |  0   1   0   0   0   0   0   0  |
  *  (V|I)' = |  0   0   1   0   0   0   0   0  |
  *           |  0   0   0   1   0   0   0   0  |
  *           |  0   0   0   0   1   0   0   0  |
  *           |  0   0   0   0   0   1   0   0  |
  *           |  0   0   0   0   0   0   1   0  |
  *           |  0   0   0   0   0   0   0   1  |
  *           ~~                               ~~
  *
  * Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
  * have carefully chosen the seed values 1, 2, and 4 to ensure that this
  * matrix is not singular.
  * __                                                                 __
  * |  1   1   1   1   1   1   1   1     1   0   0   0   0   0   0   0  |
  * |  19 205 116  29  64  16  4   1     0   1   0   0   0   0   0   0  |
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  * __                                                                 __
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  1   1   1   1   1   1   1   1     1   0   0   0   0   0   0   0  |
  * |  19 205 116  29  64  16  4   1     0   1   0   0   0   0   0   0  |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  * __                                                                 __
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  0   1   1   0   0   0   0   0     1   0   1   1   1   1   1   1  |
  * |  0  205 116  0   0   0   0   0     0   1   19  29  64  16  4   1  |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  * __                                                                 __
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  0   1   1   0   0   0   0   0     1   0   1   1   1   1   1   1  |
  * |  0   0  185  0   0   0   0   0    205  1  222 208 141 221 201 204 |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  * __                                                                 __
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  0   1   1   0   0   0   0   0     1   0   1   1   1   1   1   1  |
  * |  0   0   1   0   0   0   0   0    166 100  4   40 158 168 216 209 |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  * __                                                                 __
  * |  1   0   0   0   0   0   0   0     0   0   1   0   0   0   0   0  |
  * |  0   1   0   0   0   0   0   0    167 100  5   41 159 169 217 208 |
  * |  0   0   1   0   0   0   0   0    166 100  4   40 158 168 216 209 |
  * |  0   0   0   1   0   0   0   0     0   0   0   1   0   0   0   0  |
  * |  0   0   0   0   1   0   0   0     0   0   0   0   1   0   0   0  |
  * |  0   0   0   0   0   1   0   0     0   0   0   0   0   1   0   0  |
  * |  0   0   0   0   0   0   1   0     0   0   0   0   0   0   1   0  |
  * |  0   0   0   0   0   0   0   1     0   0   0   0   0   0   0   1  |
  * ~~                                                                 ~~
  *                   __                               __
  *                   |  0   0   1   0   0   0   0   0  |
  *                   | 167 100  5   41 159 169 217 208 |
  *                   | 166 100  4   40 158 168 216 209 |
  *       (V|I)'^-1 = |  0   0   0   1   0   0   0   0  |
  *                   |  0   0   0   0   1   0   0   0  |
  *                   |  0   0   0   0   0   1   0   0  |
  *                   |  0   0   0   0   0   0   1   0  |
  *                   |  0   0   0   0   0   0   0   1  |
  *                   ~~                               ~~
  *
  * We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
  * of the missing data.
  *
  * As is apparent from the example above, the only non-trivial rows in the
  * inverse matrix correspond to the data disks that we're trying to
  * reconstruct. Indeed, those are the only rows we need as the others would
  * only be useful for reconstructing data known or assumed to be valid. For
  * that reason, we only build the coefficients in the rows that correspond to
  * targeted columns.
  */
 /* END CSTYLED */
 
 static void
 vdev_raidz_matrix_init(raidz_map_t *rm, int n, int nmap, int *map,
     uint8_t **rows)
 {
 	int i, j;
 	int pow;
 
 	ASSERT(n == rm->rm_cols - rm->rm_firstdatacol);
 
 	/*
 	 * Fill in the missing rows of interest.
 	 */
 	for (i = 0; i < nmap; i++) {
 		ASSERT3S(0, <=, map[i]);
 		ASSERT3S(map[i], <=, 2);
 
 		pow = map[i] * n;
 		if (pow > 255)
 			pow -= 255;
 		ASSERT(pow <= 255);
 
 		for (j = 0; j < n; j++) {
 			pow -= map[i];
 			if (pow < 0)
 				pow += 255;
 			rows[i][j] = vdev_raidz_pow2[pow];
 		}
 	}
 }
 
 static void
 vdev_raidz_matrix_invert(raidz_map_t *rm, int n, int nmissing, int *missing,
     uint8_t **rows, uint8_t **invrows, const uint8_t *used)
 {
 	int i, j, ii, jj;
 	uint8_t log;
 
 	/*
 	 * Assert that the first nmissing entries from the array of used
 	 * columns correspond to parity columns and that subsequent entries
 	 * correspond to data columns.
 	 */
 	for (i = 0; i < nmissing; i++) {
 		ASSERT3S(used[i], <, rm->rm_firstdatacol);
 	}
 	for (; i < n; i++) {
 		ASSERT3S(used[i], >=, rm->rm_firstdatacol);
 	}
 
 	/*
 	 * First initialize the storage where we'll compute the inverse rows.
 	 */
 	for (i = 0; i < nmissing; i++) {
 		for (j = 0; j < n; j++) {
 			invrows[i][j] = (i == j) ? 1 : 0;
 		}
 	}
 
 	/*
 	 * Subtract all trivial rows from the rows of consequence.
 	 */
 	for (i = 0; i < nmissing; i++) {
 		for (j = nmissing; j < n; j++) {
 			ASSERT3U(used[j], >=, rm->rm_firstdatacol);
 			jj = used[j] - rm->rm_firstdatacol;
 			ASSERT3S(jj, <, n);
 			invrows[i][j] = rows[i][jj];
 			rows[i][jj] = 0;
 		}
 	}
 
 	/*
 	 * For each of the rows of interest, we must normalize it and subtract
 	 * a multiple of it from the other rows.
 	 */
 	for (i = 0; i < nmissing; i++) {
 		for (j = 0; j < missing[i]; j++) {
 			ASSERT3U(rows[i][j], ==, 0);
 		}
 		ASSERT3U(rows[i][missing[i]], !=, 0);
 
 		/*
 		 * Compute the inverse of the first element and multiply each
 		 * element in the row by that value.
 		 */
 		log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
 
 		for (j = 0; j < n; j++) {
 			rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
 			invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
 		}
 
 		for (ii = 0; ii < nmissing; ii++) {
 			if (i == ii)
 				continue;
 
 			ASSERT3U(rows[ii][missing[i]], !=, 0);
 
 			log = vdev_raidz_log2[rows[ii][missing[i]]];
 
 			for (j = 0; j < n; j++) {
 				rows[ii][j] ^=
 				    vdev_raidz_exp2(rows[i][j], log);
 				invrows[ii][j] ^=
 				    vdev_raidz_exp2(invrows[i][j], log);
 			}
 		}
 	}
 
 	/*
 	 * Verify that the data that is left in the rows are properly part of
 	 * an identity matrix.
 	 */
 	for (i = 0; i < nmissing; i++) {
 		for (j = 0; j < n; j++) {
 			if (j == missing[i]) {
 				ASSERT3U(rows[i][j], ==, 1);
 			} else {
 				ASSERT3U(rows[i][j], ==, 0);
 			}
 		}
 	}
 }
 
 static void
 vdev_raidz_matrix_reconstruct(raidz_map_t *rm, int n, int nmissing,
     int *missing, uint8_t **invrows, const uint8_t *used)
 {
 	int i, j, x, cc, c;
 	uint8_t *src;
 	uint64_t ccount;
 	uint8_t *dst[VDEV_RAIDZ_MAXPARITY];
 	uint64_t dcount[VDEV_RAIDZ_MAXPARITY];
 	uint8_t log, val;
 	int ll;
 	uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
 	uint8_t *p, *pp;
 	size_t psize;
 
 	log = 0;	/* gcc */
 	psize = sizeof (invlog[0][0]) * n * nmissing;
 	p = zfs_alloc(psize);
 
 	for (pp = p, i = 0; i < nmissing; i++) {
 		invlog[i] = pp;
 		pp += n;
 	}
 
 	for (i = 0; i < nmissing; i++) {
 		for (j = 0; j < n; j++) {
 			ASSERT3U(invrows[i][j], !=, 0);
 			invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
 		}
 	}
 
 	for (i = 0; i < n; i++) {
 		c = used[i];
 		ASSERT3U(c, <, rm->rm_cols);
 
 		src = rm->rm_col[c].rc_data;
 		ccount = rm->rm_col[c].rc_size;
 		for (j = 0; j < nmissing; j++) {
 			cc = missing[j] + rm->rm_firstdatacol;
 			ASSERT3U(cc, >=, rm->rm_firstdatacol);
 			ASSERT3U(cc, <, rm->rm_cols);
 			ASSERT3U(cc, !=, c);
 
 			dst[j] = rm->rm_col[cc].rc_data;
 			dcount[j] = rm->rm_col[cc].rc_size;
 		}
 
 		ASSERT(ccount >= rm->rm_col[missing[0]].rc_size || i > 0);
 
 		for (x = 0; x < ccount; x++, src++) {
 			if (*src != 0)
 				log = vdev_raidz_log2[*src];
 
 			for (cc = 0; cc < nmissing; cc++) {
 				if (x >= dcount[cc])
 					continue;
 
 				if (*src == 0) {
 					val = 0;
 				} else {
 					if ((ll = log + invlog[cc][i]) >= 255)
 						ll -= 255;
 					val = vdev_raidz_pow2[ll];
 				}
 
 				if (i == 0)
 					dst[cc][x] = val;
 				else
 					dst[cc][x] ^= val;
 			}
 		}
 	}
 
 	zfs_free(p, psize);
 }
 
 static int
 vdev_raidz_reconstruct_general(raidz_map_t *rm, int *tgts, int ntgts)
 {
 	int n, i, c, t, tt;
 	int nmissing_rows;
 	int missing_rows[VDEV_RAIDZ_MAXPARITY];
 	int parity_map[VDEV_RAIDZ_MAXPARITY];
 
 	uint8_t *p, *pp;
 	size_t psize;
 
 	uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
 	uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
 	uint8_t *used;
 
 	int code = 0;
 
 
 	n = rm->rm_cols - rm->rm_firstdatacol;
 
 	/*
 	 * Figure out which data columns are missing.
 	 */
 	nmissing_rows = 0;
 	for (t = 0; t < ntgts; t++) {
 		if (tgts[t] >= rm->rm_firstdatacol) {
 			missing_rows[nmissing_rows++] =
 			    tgts[t] - rm->rm_firstdatacol;
 		}
 	}
 
 	/*
 	 * Figure out which parity columns to use to help generate the missing
 	 * data columns.
 	 */
 	for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
 		ASSERT(tt < ntgts);
 		ASSERT(c < rm->rm_firstdatacol);
 
 		/*
 		 * Skip any targeted parity columns.
 		 */
 		if (c == tgts[tt]) {
 			tt++;
 			continue;
 		}
 
 		code |= 1 << c;
 
 		parity_map[i] = c;
 		i++;
 	}
 
 	ASSERT(code != 0);
 	ASSERT3U(code, <, 1 << VDEV_RAIDZ_MAXPARITY);
 
 	psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
 	    nmissing_rows * n + sizeof (used[0]) * n;
 	p = kmem_alloc(psize, KM_SLEEP);
 
 	for (pp = p, i = 0; i < nmissing_rows; i++) {
 		rows[i] = pp;
 		pp += n;
 		invrows[i] = pp;
 		pp += n;
 	}
 	used = pp;
 
 	for (i = 0; i < nmissing_rows; i++) {
 		used[i] = parity_map[i];
 	}
 
 	for (tt = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
 		if (tt < nmissing_rows &&
 		    c == missing_rows[tt] + rm->rm_firstdatacol) {
 			tt++;
 			continue;
 		}
 
 		ASSERT3S(i, <, n);
 		used[i] = c;
 		i++;
 	}
 
 	/*
 	 * Initialize the interesting rows of the matrix.
 	 */
 	vdev_raidz_matrix_init(rm, n, nmissing_rows, parity_map, rows);
 
 	/*
 	 * Invert the matrix.
 	 */
 	vdev_raidz_matrix_invert(rm, n, nmissing_rows, missing_rows, rows,
 	    invrows, used);
 
 	/*
 	 * Reconstruct the missing data using the generated matrix.
 	 */
 	vdev_raidz_matrix_reconstruct(rm, n, nmissing_rows, missing_rows,
 	    invrows, used);
 
 	kmem_free(p, psize);
 
 	return (code);
 }
 
 static int
 vdev_raidz_reconstruct(raidz_map_t *rm, int *t, int nt)
 {
 	int tgts[VDEV_RAIDZ_MAXPARITY];
 	int ntgts;
 	int i, c;
 	int code;
 	int nbadparity, nbaddata;
 
 	/*
 	 * The tgts list must already be sorted.
 	 */
 	for (i = 1; i < nt; i++) {
 		ASSERT(t[i] > t[i - 1]);
 	}
 
 	nbadparity = rm->rm_firstdatacol;
 	nbaddata = rm->rm_cols - nbadparity;
 	ntgts = 0;
 	for (i = 0, c = 0; c < rm->rm_cols; c++) {
 		if (i < nt && c == t[i]) {
 			tgts[ntgts++] = c;
 			i++;
 		} else if (rm->rm_col[c].rc_error != 0) {
 			tgts[ntgts++] = c;
 		} else if (c >= rm->rm_firstdatacol) {
 			nbaddata--;
 		} else {
 			nbadparity--;
 		}
 	}
 
 	ASSERT(ntgts >= nt);
 	ASSERT(nbaddata >= 0);
 	ASSERT(nbaddata + nbadparity == ntgts);
 
 	code = vdev_raidz_reconstruct_general(rm, tgts, ntgts);
 	ASSERT(code < (1 << VDEV_RAIDZ_MAXPARITY));
 	ASSERT(code > 0);
 	return (code);
 }
 
 static raidz_map_t *
 vdev_raidz_map_alloc(void *data, off_t offset, size_t size, uint64_t unit_shift,
     uint64_t dcols, uint64_t nparity)
 {
 	raidz_map_t *rm;
 	uint64_t b = offset >> unit_shift;
 	uint64_t s = size >> unit_shift;
 	uint64_t f = b % dcols;
 	uint64_t o = (b / dcols) << unit_shift;
 	uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
 
 	q = s / (dcols - nparity);
 	r = s - q * (dcols - nparity);
 	bc = (r == 0 ? 0 : r + nparity);
 	tot = s + nparity * (q + (r == 0 ? 0 : 1));
 
 	if (q == 0) {
 		acols = bc;
 		scols = MIN(dcols, roundup(bc, nparity + 1));
 	} else {
 		acols = dcols;
 		scols = dcols;
 	}
 
 	ASSERT3U(acols, <=, scols);
 
 	rm = zfs_alloc(offsetof(raidz_map_t, rm_col[scols]));
 
 	rm->rm_cols = acols;
 	rm->rm_scols = scols;
 	rm->rm_bigcols = bc;
 	rm->rm_skipstart = bc;
 	rm->rm_missingdata = 0;
 	rm->rm_missingparity = 0;
 	rm->rm_firstdatacol = nparity;
 	rm->rm_reports = 0;
 	rm->rm_freed = 0;
 	rm->rm_ecksuminjected = 0;
 
 	asize = 0;
 
 	for (c = 0; c < scols; c++) {
 		col = f + c;
 		coff = o;
 		if (col >= dcols) {
 			col -= dcols;
 			coff += 1ULL << unit_shift;
 		}
 		rm->rm_col[c].rc_devidx = col;
 		rm->rm_col[c].rc_offset = coff;
 		rm->rm_col[c].rc_data = NULL;
 		rm->rm_col[c].rc_error = 0;
 		rm->rm_col[c].rc_tried = 0;
 		rm->rm_col[c].rc_skipped = 0;
 
 		if (c >= acols)
 			rm->rm_col[c].rc_size = 0;
 		else if (c < bc)
 			rm->rm_col[c].rc_size = (q + 1) << unit_shift;
 		else
 			rm->rm_col[c].rc_size = q << unit_shift;
 
 		asize += rm->rm_col[c].rc_size;
 	}
 
 	ASSERT3U(asize, ==, tot << unit_shift);
 	rm->rm_asize = roundup(asize, (nparity + 1) << unit_shift);
 	rm->rm_nskip = roundup(tot, nparity + 1) - tot;
 	ASSERT3U(rm->rm_asize - asize, ==, rm->rm_nskip << unit_shift);
 	ASSERT3U(rm->rm_nskip, <=, nparity);
 
 	for (c = 0; c < rm->rm_firstdatacol; c++)
 		rm->rm_col[c].rc_data = zfs_alloc(rm->rm_col[c].rc_size);
 
 	rm->rm_col[c].rc_data = data;
 
 	for (c = c + 1; c < acols; c++)
 		rm->rm_col[c].rc_data = (char *)rm->rm_col[c - 1].rc_data +
 		    rm->rm_col[c - 1].rc_size;
 
 	/*
 	 * If all data stored spans all columns, there's a danger that parity
 	 * will always be on the same device and, since parity isn't read
 	 * during normal operation, that that device's I/O bandwidth won't be
 	 * used effectively. We therefore switch the parity every 1MB.
 	 *
 	 * ... at least that was, ostensibly, the theory. As a practical
 	 * matter unless we juggle the parity between all devices evenly, we
 	 * won't see any benefit. Further, occasional writes that aren't a
 	 * multiple of the LCM of the number of children and the minimum
 	 * stripe width are sufficient to avoid pessimal behavior.
 	 * Unfortunately, this decision created an implicit on-disk format
 	 * requirement that we need to support for all eternity, but only
 	 * for single-parity RAID-Z.
 	 *
 	 * If we intend to skip a sector in the zeroth column for padding
 	 * we must make sure to note this swap. We will never intend to
 	 * skip the first column since at least one data and one parity
 	 * column must appear in each row.
 	 */
 	ASSERT(rm->rm_cols >= 2);
 	ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size);
 
 	if (rm->rm_firstdatacol == 1 && (offset & (1ULL << 20))) {
 		devidx = rm->rm_col[0].rc_devidx;
 		o = rm->rm_col[0].rc_offset;
 		rm->rm_col[0].rc_devidx = rm->rm_col[1].rc_devidx;
 		rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset;
 		rm->rm_col[1].rc_devidx = devidx;
 		rm->rm_col[1].rc_offset = o;
 
 		if (rm->rm_skipstart == 0)
 			rm->rm_skipstart = 1;
 	}
 
 	return (rm);
 }
 
 static void
 vdev_raidz_map_free(raidz_map_t *rm)
 {
 	int c;
 
 	for (c = rm->rm_firstdatacol - 1; c >= 0; c--)
 		zfs_free(rm->rm_col[c].rc_data, rm->rm_col[c].rc_size);
 
 	zfs_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_scols]));
 }
 
 static vdev_t *
 vdev_child(vdev_t *pvd, uint64_t devidx)
 {
 	vdev_t *cvd;
 
 	STAILQ_FOREACH(cvd, &pvd->v_children, v_childlink) {
 		if (cvd->v_id == devidx)
 			break;
 	}
 
 	return (cvd);
 }
 
 /*
  * We keep track of whether or not there were any injected errors, so that
  * any ereports we generate can note it.
  */
 static int
-raidz_checksum_verify(const blkptr_t *bp, void *data, uint64_t size)
+raidz_checksum_verify(const spa_t *spa, const blkptr_t *bp, void *data,
+    uint64_t size)
 {
-
-	return (zio_checksum_verify(bp, data));
+	return (zio_checksum_verify(spa, bp, data));
 }
 
 /*
  * Generate the parity from the data columns. If we tried and were able to
  * read the parity without error, verify that the generated parity matches the
  * data we read. If it doesn't, we fire off a checksum error. Return the
  * number such failures.
  */
 static int
 raidz_parity_verify(raidz_map_t *rm)
 {
 	void *orig[VDEV_RAIDZ_MAXPARITY];
 	int c, ret = 0;
 	raidz_col_t *rc;
 
 	for (c = 0; c < rm->rm_firstdatacol; c++) {
 		rc = &rm->rm_col[c];
 		if (!rc->rc_tried || rc->rc_error != 0)
 			continue;
 		orig[c] = zfs_alloc(rc->rc_size);
 		bcopy(rc->rc_data, orig[c], rc->rc_size);
 	}
 
 	vdev_raidz_generate_parity(rm);
 
 	for (c = rm->rm_firstdatacol - 1; c >= 0; c--) {
 		rc = &rm->rm_col[c];
 		if (!rc->rc_tried || rc->rc_error != 0)
 			continue;
 		if (bcmp(orig[c], rc->rc_data, rc->rc_size) != 0) {
 			rc->rc_error = ECKSUM;
 			ret++;
 		}
 		zfs_free(orig[c], rc->rc_size);
 	}
 
 	return (ret);
 }
 
 /*
  * Iterate over all combinations of bad data and attempt a reconstruction.
  * Note that the algorithm below is non-optimal because it doesn't take into
  * account how reconstruction is actually performed. For example, with
  * triple-parity RAID-Z the reconstruction procedure is the same if column 4
  * is targeted as invalid as if columns 1 and 4 are targeted since in both
  * cases we'd only use parity information in column 0.
  */
 static int
-vdev_raidz_combrec(raidz_map_t *rm, const blkptr_t *bp, void *data,
-    off_t offset, uint64_t bytes, int total_errors, int data_errors)
+vdev_raidz_combrec(const spa_t *spa, raidz_map_t *rm, const blkptr_t *bp,
+    void *data, off_t offset, uint64_t bytes, int total_errors, int data_errors)
 {
 	raidz_col_t *rc;
 	void *orig[VDEV_RAIDZ_MAXPARITY];
 	int tstore[VDEV_RAIDZ_MAXPARITY + 2];
 	int *tgts = &tstore[1];
 	int current, next, i, c, n;
 	int code, ret = 0;
 
 	ASSERT(total_errors < rm->rm_firstdatacol);
 
 	/*
 	 * This simplifies one edge condition.
 	 */
 	tgts[-1] = -1;
 
 	for (n = 1; n <= rm->rm_firstdatacol - total_errors; n++) {
 		/*
 		 * Initialize the targets array by finding the first n columns
 		 * that contain no error.
 		 *
 		 * If there were no data errors, we need to ensure that we're
 		 * always explicitly attempting to reconstruct at least one
 		 * data column. To do this, we simply push the highest target
 		 * up into the data columns.
 		 */
 		for (c = 0, i = 0; i < n; i++) {
 			if (i == n - 1 && data_errors == 0 &&
 			    c < rm->rm_firstdatacol) {
 				c = rm->rm_firstdatacol;
 			}
 
 			while (rm->rm_col[c].rc_error != 0) {
 				c++;
 				ASSERT3S(c, <, rm->rm_cols);
 			}
 
 			tgts[i] = c++;
 		}
 
 		/*
 		 * Setting tgts[n] simplifies the other edge condition.
 		 */
 		tgts[n] = rm->rm_cols;
 
 		/*
 		 * These buffers were allocated in previous iterations.
 		 */
 		for (i = 0; i < n - 1; i++) {
 			ASSERT(orig[i] != NULL);
 		}
 
 		orig[n - 1] = zfs_alloc(rm->rm_col[0].rc_size);
 
 		current = 0;
 		next = tgts[current];
 
 		while (current != n) {
 			tgts[current] = next;
 			current = 0;
 
 			/*
 			 * Save off the original data that we're going to
 			 * attempt to reconstruct.
 			 */
 			for (i = 0; i < n; i++) {
 				ASSERT(orig[i] != NULL);
 				c = tgts[i];
 				ASSERT3S(c, >=, 0);
 				ASSERT3S(c, <, rm->rm_cols);
 				rc = &rm->rm_col[c];
 				bcopy(rc->rc_data, orig[i], rc->rc_size);
 			}
 
 			/*
 			 * Attempt a reconstruction and exit the outer loop on
 			 * success.
 			 */
 			code = vdev_raidz_reconstruct(rm, tgts, n);
-			if (raidz_checksum_verify(bp, data, bytes) == 0) {
+			if (raidz_checksum_verify(spa, bp, data, bytes) == 0) {
 				for (i = 0; i < n; i++) {
 					c = tgts[i];
 					rc = &rm->rm_col[c];
 					ASSERT(rc->rc_error == 0);
 					rc->rc_error = ECKSUM;
 				}
 
 				ret = code;
 				goto done;
 			}
 
 			/*
 			 * Restore the original data.
 			 */
 			for (i = 0; i < n; i++) {
 				c = tgts[i];
 				rc = &rm->rm_col[c];
 				bcopy(orig[i], rc->rc_data, rc->rc_size);
 			}
 
 			do {
 				/*
 				 * Find the next valid column after the current
 				 * position..
 				 */
 				for (next = tgts[current] + 1;
 				    next < rm->rm_cols &&
 				    rm->rm_col[next].rc_error != 0; next++)
 					continue;
 
 				ASSERT(next <= tgts[current + 1]);
 
 				/*
 				 * If that spot is available, we're done here.
 				 */
 				if (next != tgts[current + 1])
 					break;
 
 				/*
 				 * Otherwise, find the next valid column after
 				 * the previous position.
 				 */
 				for (c = tgts[current - 1] + 1;
 				    rm->rm_col[c].rc_error != 0; c++)
 					continue;
 
 				tgts[current] = c;
 				current++;
 
 			} while (current != n);
 		}
 	}
 	n--;
 done:
 	for (i = n - 1; i >= 0; i--) {
 		zfs_free(orig[i], rm->rm_col[0].rc_size);
 	}
 
 	return (ret);
 }
 
 static int
 vdev_raidz_read(vdev_t *vd, const blkptr_t *bp, void *data,
     off_t offset, size_t bytes)
 {
 	vdev_t *tvd = vd->v_top;
 	vdev_t *cvd;
 	raidz_map_t *rm;
 	raidz_col_t *rc;
 	int c, error;
 	int unexpected_errors;
 	int parity_errors;
 	int parity_untried;
 	int data_errors;
 	int total_errors;
 	int n;
 	int tgts[VDEV_RAIDZ_MAXPARITY];
 	int code;
 
 	rc = NULL;	/* gcc */
 	error = 0;
 
 	rm = vdev_raidz_map_alloc(data, offset, bytes, tvd->v_ashift,
 	    vd->v_nchildren, vd->v_nparity);
 
 	/*
 	 * Iterate over the columns in reverse order so that we hit the parity
 	 * last -- any errors along the way will force us to read the parity.
 	 */
 	for (c = rm->rm_cols - 1; c >= 0; c--) {
 		rc = &rm->rm_col[c];
 		cvd = vdev_child(vd, rc->rc_devidx);
 		if (cvd == NULL || cvd->v_state != VDEV_STATE_HEALTHY) {
 			if (c >= rm->rm_firstdatacol)
 				rm->rm_missingdata++;
 			else
 				rm->rm_missingparity++;
 			rc->rc_error = ENXIO;
 			rc->rc_tried = 1;	/* don't even try */
 			rc->rc_skipped = 1;
 			continue;
 		}
 #if 0		/* XXX: Too hard for the boot code. */
 		if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
 			if (c >= rm->rm_firstdatacol)
 				rm->rm_missingdata++;
 			else
 				rm->rm_missingparity++;
 			rc->rc_error = ESTALE;
 			rc->rc_skipped = 1;
 			continue;
 		}
 #endif
 		if (c >= rm->rm_firstdatacol || rm->rm_missingdata > 0) {
 			rc->rc_error = cvd->v_read(cvd, NULL, rc->rc_data,
 			    rc->rc_offset, rc->rc_size);
 			rc->rc_tried = 1;
 			rc->rc_skipped = 0;
 		}
 	}
 
 reconstruct:
 	unexpected_errors = 0;
 	parity_errors = 0;
 	parity_untried = 0;
 	data_errors = 0;
 	total_errors = 0;
 
 	ASSERT(rm->rm_missingparity <= rm->rm_firstdatacol);
 	ASSERT(rm->rm_missingdata <= rm->rm_cols - rm->rm_firstdatacol);
 
 	for (c = 0; c < rm->rm_cols; c++) {
 		rc = &rm->rm_col[c];
 
 		if (rc->rc_error) {
 			ASSERT(rc->rc_error != ECKSUM);	/* child has no bp */
 
 			if (c < rm->rm_firstdatacol)
 				parity_errors++;
 			else
 				data_errors++;
 
 			if (!rc->rc_skipped)
 				unexpected_errors++;
 
 			total_errors++;
 		} else if (c < rm->rm_firstdatacol && !rc->rc_tried) {
 			parity_untried++;
 		}
 	}
 
 	/*
 	 * There are three potential phases for a read:
 	 *	1. produce valid data from the columns read
 	 *	2. read all disks and try again
 	 *	3. perform combinatorial reconstruction
 	 *
 	 * Each phase is progressively both more expensive and less likely to
 	 * occur. If we encounter more errors than we can repair or all phases
 	 * fail, we have no choice but to return an error.
 	 */
 
 	/*
 	 * If the number of errors we saw was correctable -- less than or equal
 	 * to the number of parity disks read -- attempt to produce data that
 	 * has a valid checksum. Naturally, this case applies in the absence of
 	 * any errors.
 	 */
 	if (total_errors <= rm->rm_firstdatacol - parity_untried) {
 		if (data_errors == 0) {
-			if (raidz_checksum_verify(bp, data, bytes) == 0) {
+			if (raidz_checksum_verify(vd->spa, bp, data, bytes) == 0) {
 				/*
 				 * If we read parity information (unnecessarily
 				 * as it happens since no reconstruction was
 				 * needed) regenerate and verify the parity.
 				 * We also regenerate parity when resilvering
 				 * so we can write it out to the failed device
 				 * later.
 				 */
 				if (parity_errors + parity_untried <
 				    rm->rm_firstdatacol) {
 					n = raidz_parity_verify(rm);
 					unexpected_errors += n;
 					ASSERT(parity_errors + n <=
 					    rm->rm_firstdatacol);
 				}
 				goto done;
 			}
 		} else {
 			/*
 			 * We either attempt to read all the parity columns or
 			 * none of them. If we didn't try to read parity, we
 			 * wouldn't be here in the correctable case. There must
 			 * also have been fewer parity errors than parity
 			 * columns or, again, we wouldn't be in this code path.
 			 */
 			ASSERT(parity_untried == 0);
 			ASSERT(parity_errors < rm->rm_firstdatacol);
 
 			/*
 			 * Identify the data columns that reported an error.
 			 */
 			n = 0;
 			for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
 				rc = &rm->rm_col[c];
 				if (rc->rc_error != 0) {
 					ASSERT(n < VDEV_RAIDZ_MAXPARITY);
 					tgts[n++] = c;
 				}
 			}
 
 			ASSERT(rm->rm_firstdatacol >= n);
 
 			code = vdev_raidz_reconstruct(rm, tgts, n);
 
-			if (raidz_checksum_verify(bp, data, bytes) == 0) {
+			if (raidz_checksum_verify(vd->spa, bp, data, bytes) == 0) {
 				/*
 				 * If we read more parity disks than were used
 				 * for reconstruction, confirm that the other
 				 * parity disks produced correct data. This
 				 * routine is suboptimal in that it regenerates
 				 * the parity that we already used in addition
 				 * to the parity that we're attempting to
 				 * verify, but this should be a relatively
 				 * uncommon case, and can be optimized if it
 				 * becomes a problem. Note that we regenerate
 				 * parity when resilvering so we can write it
 				 * out to failed devices later.
 				 */
 				if (parity_errors < rm->rm_firstdatacol - n) {
 					n = raidz_parity_verify(rm);
 					unexpected_errors += n;
 					ASSERT(parity_errors + n <=
 					    rm->rm_firstdatacol);
 				}
 
 				goto done;
 			}
 		}
 	}
 
 	/*
 	 * This isn't a typical situation -- either we got a read
 	 * error or a child silently returned bad data. Read every
 	 * block so we can try again with as much data and parity as
 	 * we can track down. If we've already been through once
 	 * before, all children will be marked as tried so we'll
 	 * proceed to combinatorial reconstruction.
 	 */
 	unexpected_errors = 1;
 	rm->rm_missingdata = 0;
 	rm->rm_missingparity = 0;
 
 	n = 0;
 	for (c = 0; c < rm->rm_cols; c++) {
 		rc = &rm->rm_col[c];
 
 		if (rc->rc_tried)
 			continue;
 
 		cvd = vdev_child(vd, rc->rc_devidx);
 		ASSERT(cvd != NULL);
 		rc->rc_error = cvd->v_read(cvd, NULL,
 		    rc->rc_data, rc->rc_offset, rc->rc_size);
 		if (rc->rc_error == 0)
 			n++;
 		rc->rc_tried = 1;
 		rc->rc_skipped = 0;
 	}
 	/*
 	 * If we managed to read anything more, retry the
 	 * reconstruction.
 	 */
 	if (n > 0)
 		goto reconstruct;
 
 	/*
 	 * At this point we've attempted to reconstruct the data given the
 	 * errors we detected, and we've attempted to read all columns. There
 	 * must, therefore, be one or more additional problems -- silent errors
 	 * resulting in invalid data rather than explicit I/O errors resulting
 	 * in absent data. We check if there is enough additional data to
 	 * possibly reconstruct the data and then perform combinatorial
 	 * reconstruction over all possible combinations. If that fails,
 	 * we're cooked.
 	 */
 	if (total_errors > rm->rm_firstdatacol) {
 		error = EIO;
 	} else if (total_errors < rm->rm_firstdatacol &&
-	    (code = vdev_raidz_combrec(rm, bp, data, offset, bytes,
+	    (code = vdev_raidz_combrec(vd->spa, rm, bp, data, offset, bytes,
 	     total_errors, data_errors)) != 0) {
 		/*
 		 * If we didn't use all the available parity for the
 		 * combinatorial reconstruction, verify that the remaining
 		 * parity is correct.
 		 */
 		if (code != (1 << rm->rm_firstdatacol) - 1)
 			(void) raidz_parity_verify(rm);
 	} else {
 		/*
 		 * We're here because either:
 		 *
 		 *	total_errors == rm_first_datacol, or
 		 *	vdev_raidz_combrec() failed
 		 *
 		 * In either case, there is enough bad data to prevent
 		 * reconstruction.
 		 *
 		 * Start checksum ereports for all children which haven't
 		 * failed, and the IO wasn't speculative.
 		 */
 		error = ECKSUM;
 	}
 
 done:
 	vdev_raidz_map_free(rm);
 
 	return (error);
 }