Index: head/stand/efi/libefi/Makefile =================================================================== --- head/stand/efi/libefi/Makefile (revision 362430) +++ head/stand/efi/libefi/Makefile (revision 362431) @@ -1,69 +1,71 @@ # $FreeBSD$ .include LIB= efi WARNS?= 2 SRCS= delay.c \ devicename.c \ devpath.c \ efi_console.c \ efi_driver_utils.c \ efichar.c \ efienv.c \ efihttp.c \ efinet.c \ efipart.c \ efizfs.c \ env.c \ errno.c \ handles.c \ libefi.c \ wchar.c .PATH: ${SYSDIR}/teken SRCS+= teken.c .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" SRCS+= time.c .elif ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "arm" SRCS+= time_event.c .endif # We implement a slightly non-standard %S in that it always takes a # CHAR16 that's common in UEFI-land instead of a wchar_t. This only # seems to matter on arm64 where wchar_t defaults to an int instead # of a short. There's no good cast to use here so just ignore the # warnings for now. CWARNFLAGS.efinet.c+= -Wno-format CWARNFLAGS.efipart.c+= -Wno-format CWARNFLAGS.env.c+= -Wno-format .if ${MACHINE_CPUARCH} == "aarch64" CFLAGS+= -mgeneral-regs-only .endif .if ${MACHINE_ARCH} == "amd64" CFLAGS+= -fPIC -mno-red-zone .endif CFLAGS+= -I${EFIINC} CFLAGS+= -I${EFIINCMD} CFLAGS.efi_console.c+= -I${SRCTOP}/sys/teken CFLAGS.teken.c+= -I${SRCTOP}/sys/teken .if ${MK_LOADER_ZFS} != "no" CFLAGS+= -I${ZFSSRC} +CFLAGS+= -I${SYSDIR}/cddl/boot/zfs +CFLAGS+= -I${SYSDIR}/cddl/contrib/opensolaris/uts/common CFLAGS+= -DEFI_ZFS_BOOT .endif # Pick up the bootstrap header for some interface items CFLAGS+= -I${LDRSRC} # Handle FreeBSD specific %b and %D printf format specifiers CFLAGS+= ${FORMAT_EXTENSIONS} # Do not use TERM_EMU on arm and arm64 as it doesn't behave well with serial console .if ${MACHINE_CPUARCH} != "arm" && ${MACHINE_CPUARCH} != "aarch64" CFLAGS+= -DTERM_EMU .endif .include Index: head/stand/efi/loader/main.c =================================================================== --- head/stand/efi/loader/main.c (revision 362430) +++ head/stand/efi/loader/main.c (revision 362431) @@ -1,1575 +1,1588 @@ /*- * Copyright (c) 2008-2010 Rui Paulo * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Copyright (c) 2016-2019 Netflix, Inc. written by M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "efizfs.h" #include "loader_efi.h" struct arch_switch archsw; /* MI/MD interface boundary */ EFI_GUID acpi = ACPI_TABLE_GUID; EFI_GUID acpi20 = ACPI_20_TABLE_GUID; EFI_GUID devid = DEVICE_PATH_PROTOCOL; EFI_GUID imgid = LOADED_IMAGE_PROTOCOL; EFI_GUID mps = MPS_TABLE_GUID; EFI_GUID netid = EFI_SIMPLE_NETWORK_PROTOCOL; EFI_GUID smbios = SMBIOS_TABLE_GUID; EFI_GUID smbios3 = SMBIOS3_TABLE_GUID; EFI_GUID dxe = DXE_SERVICES_TABLE_GUID; EFI_GUID hoblist = HOB_LIST_TABLE_GUID; EFI_GUID lzmadecomp = LZMA_DECOMPRESSION_GUID; EFI_GUID mpcore = ARM_MP_CORE_INFO_TABLE_GUID; EFI_GUID esrt = ESRT_TABLE_GUID; EFI_GUID memtype = MEMORY_TYPE_INFORMATION_TABLE_GUID; EFI_GUID debugimg = DEBUG_IMAGE_INFO_TABLE_GUID; EFI_GUID fdtdtb = FDT_TABLE_GUID; EFI_GUID inputid = SIMPLE_TEXT_INPUT_PROTOCOL; /* * Number of seconds to wait for a keystroke before exiting with failure * in the event no currdev is found. -2 means always break, -1 means * never break, 0 means poll once and then reboot, > 0 means wait for * that many seconds. "fail_timeout" can be set in the environment as * well. */ static int fail_timeout = 5; /* * Current boot variable */ UINT16 boot_current; /* * Image that we booted from. */ EFI_LOADED_IMAGE *boot_img; static bool has_keyboard(void) { EFI_STATUS status; EFI_DEVICE_PATH *path; EFI_HANDLE *hin, *hin_end, *walker; UINTN sz; bool retval = false; /* * Find all the handles that support the SIMPLE_TEXT_INPUT_PROTOCOL and * do the typical dance to get the right sized buffer. */ sz = 0; hin = NULL; status = BS->LocateHandle(ByProtocol, &inputid, 0, &sz, 0); if (status == EFI_BUFFER_TOO_SMALL) { hin = (EFI_HANDLE *)malloc(sz); status = BS->LocateHandle(ByProtocol, &inputid, 0, &sz, hin); if (EFI_ERROR(status)) free(hin); } if (EFI_ERROR(status)) return retval; /* * Look at each of the handles. If it supports the device path protocol, * use it to get the device path for this handle. Then see if that * device path matches either the USB device path for keyboards or the * legacy device path for keyboards. */ hin_end = &hin[sz / sizeof(*hin)]; for (walker = hin; walker < hin_end; walker++) { status = OpenProtocolByHandle(*walker, &devid, (void **)&path); if (EFI_ERROR(status)) continue; while (!IsDevicePathEnd(path)) { /* * Check for the ACPI keyboard node. All PNP3xx nodes * are keyboards of different flavors. Note: It is * unclear of there's always a keyboard node when * there's a keyboard controller, or if there's only one * when a keyboard is detected at boot. */ if (DevicePathType(path) == ACPI_DEVICE_PATH && (DevicePathSubType(path) == ACPI_DP || DevicePathSubType(path) == ACPI_EXTENDED_DP)) { ACPI_HID_DEVICE_PATH *acpi; acpi = (ACPI_HID_DEVICE_PATH *)(void *)path; if ((EISA_ID_TO_NUM(acpi->HID) & 0xff00) == 0x300 && (acpi->HID & 0xffff) == PNP_EISA_ID_CONST) { retval = true; goto out; } /* * Check for USB keyboard node, if present. Unlike a * PS/2 keyboard, these definitely only appear when * connected to the system. */ } else if (DevicePathType(path) == MESSAGING_DEVICE_PATH && DevicePathSubType(path) == MSG_USB_CLASS_DP) { USB_CLASS_DEVICE_PATH *usb; usb = (USB_CLASS_DEVICE_PATH *)(void *)path; if (usb->DeviceClass == 3 && /* HID */ usb->DeviceSubClass == 1 && /* Boot devices */ usb->DeviceProtocol == 1) { /* Boot keyboards */ retval = true; goto out; } } path = NextDevicePathNode(path); } } out: free(hin); return retval; } static void set_currdev(const char *devname) { /* * Don't execute hooks here; we may need to try setting these more than * once here if we're probing for the ZFS pool we're supposed to boot. * The currdev hook is intended to just validate user input anyways, * while the loaddev hook makes it immutable once we've determined what * the proper currdev is. */ env_setenv("currdev", EV_VOLATILE | EV_NOHOOK, devname, efi_setcurrdev, env_nounset); env_setenv("loaddev", EV_VOLATILE | EV_NOHOOK, devname, env_noset, env_nounset); } static void set_currdev_devdesc(struct devdesc *currdev) { const char *devname; devname = efi_fmtdev(currdev); printf("Setting currdev to %s\n", devname); set_currdev(devname); } static void set_currdev_devsw(struct devsw *dev, int unit) { struct devdesc currdev; currdev.d_dev = dev; currdev.d_unit = unit; set_currdev_devdesc(&currdev); } static void set_currdev_pdinfo(pdinfo_t *dp) { /* * Disks are special: they have partitions. if the parent * pointer is non-null, we're a partition not a full disk * and we need to adjust currdev appropriately. */ if (dp->pd_devsw->dv_type == DEVT_DISK) { struct disk_devdesc currdev; currdev.dd.d_dev = dp->pd_devsw; if (dp->pd_parent == NULL) { currdev.dd.d_unit = dp->pd_unit; currdev.d_slice = D_SLICENONE; currdev.d_partition = D_PARTNONE; } else { currdev.dd.d_unit = dp->pd_parent->pd_unit; currdev.d_slice = dp->pd_unit; currdev.d_partition = D_PARTISGPT; /* XXX Assumes GPT */ } set_currdev_devdesc((struct devdesc *)&currdev); } else { set_currdev_devsw(dp->pd_devsw, dp->pd_unit); } } static bool sanity_check_currdev(void) { struct stat st; return (stat(PATH_DEFAULTS_LOADER_CONF, &st) == 0 || #ifdef PATH_BOOTABLE_TOKEN stat(PATH_BOOTABLE_TOKEN, &st) == 0 || /* non-standard layout */ #endif stat(PATH_KERNEL, &st) == 0); } #ifdef EFI_ZFS_BOOT static bool probe_zfs_currdev(uint64_t guid) { char *devname; struct zfs_devdesc currdev; + char *buf = NULL; + bool rv; currdev.dd.d_dev = &zfs_dev; currdev.dd.d_unit = 0; currdev.pool_guid = guid; currdev.root_guid = 0; set_currdev_devdesc((struct devdesc *)&currdev); devname = efi_fmtdev(&currdev); init_zfs_bootenv(devname); - return (sanity_check_currdev()); + rv = sanity_check_currdev(); + if (rv) { + buf = malloc(VDEV_PAD_SIZE); + if (buf != NULL) { + if (zfs_nextboot(&currdev, buf, VDEV_PAD_SIZE) == 0) { + printf("zfs nextboot: %s\n", buf); + set_currdev(buf); + } + free(buf); + } + } + return (rv); } #endif static bool try_as_currdev(pdinfo_t *hd, pdinfo_t *pp) { uint64_t guid; #ifdef EFI_ZFS_BOOT /* * If there's a zpool on this device, try it as a ZFS * filesystem, which has somewhat different setup than all * other types of fs due to imperfect loader integration. * This all stems from ZFS being both a device (zpool) and * a filesystem, plus the boot env feature. */ if (efizfs_get_guid_by_handle(pp->pd_handle, &guid)) return (probe_zfs_currdev(guid)); #endif /* * All other filesystems just need the pdinfo * initialized in the standard way. */ set_currdev_pdinfo(pp); return (sanity_check_currdev()); } /* * Sometimes we get filenames that are all upper case * and/or have backslashes in them. Filter all this out * if it looks like we need to do so. */ static void fix_dosisms(char *p) { while (*p) { if (isupper(*p)) *p = tolower(*p); else if (*p == '\\') *p = '/'; p++; } } #define SIZE(dp, edp) (size_t)((intptr_t)(void *)edp - (intptr_t)(void *)dp) enum { BOOT_INFO_OK = 0, BAD_CHOICE = 1, NOT_SPECIFIC = 2 }; static int match_boot_info(char *boot_info, size_t bisz) { uint32_t attr; uint16_t fplen; size_t len; char *walker, *ep; EFI_DEVICE_PATH *dp, *edp, *first_dp, *last_dp; pdinfo_t *pp; CHAR16 *descr; char *kernel = NULL; FILEPATH_DEVICE_PATH *fp; struct stat st; CHAR16 *text; /* * FreeBSD encodes it's boot loading path into the boot loader * BootXXXX variable. We look for the last one in the path * and use that to load the kernel. However, if we only fine * one DEVICE_PATH, then there's nothing specific and we should * fall back. * * In an ideal world, we'd look at the image handle we were * passed, match up with the loader we are and then return the * next one in the path. This would be most flexible and cover * many chain booting scenarios where you need to use this * boot loader to get to the next boot loader. However, that * doesn't work. We rarely have the path to the image booted * (just the device) so we can't count on that. So, we do the * enxt best thing, we look through the device path(s) passed * in the BootXXXX varaible. If there's only one, we return * NOT_SPECIFIC. Otherwise, we look at the last one and try to * load that. If we can, we return BOOT_INFO_OK. Otherwise we * return BAD_CHOICE for the caller to sort out. */ if (bisz < sizeof(attr) + sizeof(fplen) + sizeof(CHAR16)) return NOT_SPECIFIC; walker = boot_info; ep = walker + bisz; memcpy(&attr, walker, sizeof(attr)); walker += sizeof(attr); memcpy(&fplen, walker, sizeof(fplen)); walker += sizeof(fplen); descr = (CHAR16 *)(intptr_t)walker; len = ucs2len(descr); walker += (len + 1) * sizeof(CHAR16); last_dp = first_dp = dp = (EFI_DEVICE_PATH *)walker; edp = (EFI_DEVICE_PATH *)(walker + fplen); if ((char *)edp > ep) return NOT_SPECIFIC; while (dp < edp && SIZE(dp, edp) > sizeof(EFI_DEVICE_PATH)) { text = efi_devpath_name(dp); if (text != NULL) { printf(" BootInfo Path: %S\n", text); efi_free_devpath_name(text); } last_dp = dp; dp = (EFI_DEVICE_PATH *)((char *)dp + efi_devpath_length(dp)); } /* * If there's only one item in the list, then nothing was * specified. Or if the last path doesn't have a media * path in it. Those show up as various VenHw() nodes * which are basically opaque to us. Don't count those * as something specifc. */ if (last_dp == first_dp) { printf("Ignoring Boot%04x: Only one DP found\n", boot_current); return NOT_SPECIFIC; } if (efi_devpath_to_media_path(last_dp) == NULL) { printf("Ignoring Boot%04x: No Media Path\n", boot_current); return NOT_SPECIFIC; } /* * OK. At this point we either have a good path or a bad one. * Let's check. */ pp = efiblk_get_pdinfo_by_device_path(last_dp); if (pp == NULL) { printf("Ignoring Boot%04x: Device Path not found\n", boot_current); return BAD_CHOICE; } set_currdev_pdinfo(pp); if (!sanity_check_currdev()) { printf("Ignoring Boot%04x: sanity check failed\n", boot_current); return BAD_CHOICE; } /* * OK. We've found a device that matches, next we need to check the last * component of the path. If it's a file, then we set the default kernel * to that. Otherwise, just use this as the default root. * * Reminder: we're running very early, before we've parsed the defaults * file, so we may need to have a hack override. */ dp = efi_devpath_last_node(last_dp); if (DevicePathType(dp) != MEDIA_DEVICE_PATH || DevicePathSubType(dp) != MEDIA_FILEPATH_DP) { printf("Using Boot%04x for root partition\n", boot_current); return (BOOT_INFO_OK); /* use currdir, default kernel */ } fp = (FILEPATH_DEVICE_PATH *)dp; ucs2_to_utf8(fp->PathName, &kernel); if (kernel == NULL) { printf("Not using Boot%04x: can't decode kernel\n", boot_current); return (BAD_CHOICE); } if (*kernel == '\\' || isupper(*kernel)) fix_dosisms(kernel); if (stat(kernel, &st) != 0) { free(kernel); printf("Not using Boot%04x: can't find %s\n", boot_current, kernel); return (BAD_CHOICE); } setenv("kernel", kernel, 1); free(kernel); text = efi_devpath_name(last_dp); if (text) { printf("Using Boot%04x %S + %s\n", boot_current, text, kernel); efi_free_devpath_name(text); } return (BOOT_INFO_OK); } /* * Look at the passed-in boot_info, if any. If we find it then we need * to see if we can find ourselves in the boot chain. If we can, and * there's another specified thing to boot next, assume that the file * is loaded from / and use that for the root filesystem. If can't * find the specified thing, we must fail the boot. If we're last on * the list, then we fallback to looking for the first available / * candidate (ZFS, if there's a bootable zpool, otherwise a UFS * partition that has either /boot/defaults/loader.conf on it or * /boot/kernel/kernel (the default kernel) that we can use. * * We always fail if we can't find the right thing. However, as * a concession to buggy UEFI implementations, like u-boot, if * we have determined that the host is violating the UEFI boot * manager protocol, we'll signal the rest of the program that * a drop to the OK boot loader prompt is possible. */ static int find_currdev(bool do_bootmgr, bool is_last, char *boot_info, size_t boot_info_sz) { pdinfo_t *dp, *pp; EFI_DEVICE_PATH *devpath, *copy; EFI_HANDLE h; CHAR16 *text; struct devsw *dev; int unit; uint64_t extra; int rv; char *rootdev; /* * First choice: if rootdev is already set, use that, even if * it's wrong. */ rootdev = getenv("rootdev"); if (rootdev != NULL) { printf(" Setting currdev to configured rootdev %s\n", rootdev); set_currdev(rootdev); return (0); } /* * Second choice: If uefi_rootdev is set, translate that UEFI device * path to the loader's internal name and use that. */ do { rootdev = getenv("uefi_rootdev"); if (rootdev == NULL) break; devpath = efi_name_to_devpath(rootdev); if (devpath == NULL) break; dp = efiblk_get_pdinfo_by_device_path(devpath); efi_devpath_free(devpath); if (dp == NULL) break; printf(" Setting currdev to UEFI path %s\n", rootdev); set_currdev_pdinfo(dp); return (0); } while (0); /* * Third choice: If we can find out image boot_info, and there's * a follow-on boot image in that boot_info, use that. In this * case root will be the partition specified in that image and * we'll load the kernel specified by the file path. Should there * not be a filepath, we use the default. This filepath overrides * loader.conf. */ if (do_bootmgr) { rv = match_boot_info(boot_info, boot_info_sz); switch (rv) { case BOOT_INFO_OK: /* We found it */ return (0); case BAD_CHOICE: /* specified file not found -> error */ /* XXX do we want to have an escape hatch for last in boot order? */ return (ENOENT); } /* Nothing specified, try normal match */ } #ifdef EFI_ZFS_BOOT /* * Did efi_zfs_probe() detect the boot pool? If so, use the zpool * it found, if it's sane. ZFS is the only thing that looks for * disks and pools to boot. This may change in the future, however, * if we allow specifying which pool to boot from via UEFI variables * rather than the bootenv stuff that FreeBSD uses today. */ if (pool_guid != 0) { printf("Trying ZFS pool\n"); if (probe_zfs_currdev(pool_guid)) return (0); } #endif /* EFI_ZFS_BOOT */ /* * Try to find the block device by its handle based on the * image we're booting. If we can't find a sane partition, * search all the other partitions of the disk. We do not * search other disks because it's a violation of the UEFI * boot protocol to do so. We fail and let UEFI go on to * the next candidate. */ dp = efiblk_get_pdinfo_by_handle(boot_img->DeviceHandle); if (dp != NULL) { text = efi_devpath_name(dp->pd_devpath); if (text != NULL) { printf("Trying ESP: %S\n", text); efi_free_devpath_name(text); } set_currdev_pdinfo(dp); if (sanity_check_currdev()) return (0); if (dp->pd_parent != NULL) { pdinfo_t *espdp = dp; dp = dp->pd_parent; STAILQ_FOREACH(pp, &dp->pd_part, pd_link) { /* Already tried the ESP */ if (espdp == pp) continue; /* * Roll up the ZFS special case * for those partitions that have * zpools on them. */ text = efi_devpath_name(pp->pd_devpath); if (text != NULL) { printf("Trying: %S\n", text); efi_free_devpath_name(text); } if (try_as_currdev(dp, pp)) return (0); } } } /* * Try the device handle from our loaded image first. If that * fails, use the device path from the loaded image and see if * any of the nodes in that path match one of the enumerated * handles. Currently, this handle list is only for netboot. */ if (efi_handle_lookup(boot_img->DeviceHandle, &dev, &unit, &extra) == 0) { set_currdev_devsw(dev, unit); if (sanity_check_currdev()) return (0); } copy = NULL; devpath = efi_lookup_image_devpath(IH); while (devpath != NULL) { h = efi_devpath_handle(devpath); if (h == NULL) break; free(copy); copy = NULL; if (efi_handle_lookup(h, &dev, &unit, &extra) == 0) { set_currdev_devsw(dev, unit); if (sanity_check_currdev()) return (0); } devpath = efi_lookup_devpath(h); if (devpath != NULL) { copy = efi_devpath_trim(devpath); devpath = copy; } } free(copy); return (ENOENT); } static bool interactive_interrupt(const char *msg) { time_t now, then, last; last = 0; now = then = getsecs(); printf("%s\n", msg); if (fail_timeout == -2) /* Always break to OK */ return (true); if (fail_timeout == -1) /* Never break to OK */ return (false); do { if (last != now) { printf("press any key to interrupt reboot in %d seconds\r", fail_timeout - (int)(now - then)); last = now; } /* XXX no pause or timeout wait for char */ if (ischar()) return (true); now = getsecs(); } while (now - then < fail_timeout); return (false); } static int parse_args(int argc, CHAR16 *argv[]) { int i, j, howto; bool vargood; char var[128]; /* * Parse the args to set the console settings, etc * boot1.efi passes these in, if it can read /boot.config or /boot/config * or iPXE may be setup to pass these in. Or the optional argument in the * boot environment was used to pass these arguments in (in which case * neither /boot.config nor /boot/config are consulted). * * Loop through the args, and for each one that contains an '=' that is * not the first character, add it to the environment. This allows * loader and kernel env vars to be passed on the command line. Convert * args from UCS-2 to ASCII (16 to 8 bit) as they are copied (though this * method is flawed for non-ASCII characters). */ howto = 0; for (i = 1; i < argc; i++) { cpy16to8(argv[i], var, sizeof(var)); howto |= boot_parse_arg(var); } return (howto); } static void setenv_int(const char *key, int val) { char buf[20]; snprintf(buf, sizeof(buf), "%d", val); setenv(key, buf, 1); } /* * Parse ConOut (the list of consoles active) and see if we can find a * serial port and/or a video port. It would be nice to also walk the * ACPI name space to map the UID for the serial port to a port. The * latter is especially hard. */ int parse_uefi_con_out(void) { int how, rv; int vid_seen = 0, com_seen = 0, seen = 0; size_t sz; char buf[4096], *ep; EFI_DEVICE_PATH *node; ACPI_HID_DEVICE_PATH *acpi; UART_DEVICE_PATH *uart; bool pci_pending; how = 0; sz = sizeof(buf); rv = efi_global_getenv("ConOut", buf, &sz); if (rv != EFI_SUCCESS) { /* If we don't have any ConOut default to serial */ how = RB_SERIAL; goto out; } ep = buf + sz; node = (EFI_DEVICE_PATH *)buf; while ((char *)node < ep) { pci_pending = false; if (DevicePathType(node) == ACPI_DEVICE_PATH && (DevicePathSubType(node) == ACPI_DP || DevicePathSubType(node) == ACPI_EXTENDED_DP)) { /* Check for Serial node */ acpi = (void *)node; if (EISA_ID_TO_NUM(acpi->HID) == 0x501) { setenv_int("efi_8250_uid", acpi->UID); com_seen = ++seen; } } else if (DevicePathType(node) == MESSAGING_DEVICE_PATH && DevicePathSubType(node) == MSG_UART_DP) { com_seen = ++seen; uart = (void *)node; setenv_int("efi_com_speed", uart->BaudRate); } else if (DevicePathType(node) == ACPI_DEVICE_PATH && DevicePathSubType(node) == ACPI_ADR_DP) { /* Check for AcpiAdr() Node for video */ vid_seen = ++seen; } else if (DevicePathType(node) == HARDWARE_DEVICE_PATH && DevicePathSubType(node) == HW_PCI_DP) { /* * Note, vmware fusion has a funky console device * PciRoot(0x0)/Pci(0xf,0x0) * which we can only detect at the end since we also * have to cope with: * PciRoot(0x0)/Pci(0x1f,0x0)/Serial(0x1) * so only match it if it's last. */ pci_pending = true; } node = NextDevicePathNode(node); } if (pci_pending && vid_seen == 0) vid_seen = ++seen; /* * Truth table for RB_MULTIPLE | RB_SERIAL * Value Result * 0 Use only video console * RB_SERIAL Use only serial console * RB_MULTIPLE Use both video and serial console * (but video is primary so gets rc messages) * both Use both video and serial console * (but serial is primary so gets rc messages) * * Try to honor this as best we can. If only one of serial / video * found, then use that. Otherwise, use the first one we found. * This also implies if we found nothing, default to video. */ how = 0; if (vid_seen && com_seen) { how |= RB_MULTIPLE; if (com_seen < vid_seen) how |= RB_SERIAL; } else if (com_seen) how |= RB_SERIAL; out: return (how); } void parse_loader_efi_config(EFI_HANDLE h, const char *env_fn) { pdinfo_t *dp; struct stat st; int fd = -1; char *env = NULL; dp = efiblk_get_pdinfo_by_handle(h); if (dp == NULL) return; set_currdev_pdinfo(dp); if (stat(env_fn, &st) != 0) return; fd = open(env_fn, O_RDONLY); if (fd == -1) return; env = malloc(st.st_size + 1); if (env == NULL) goto out; if (read(fd, env, st.st_size) != st.st_size) goto out; env[st.st_size] = '\0'; boot_parse_cmdline(env); out: free(env); close(fd); } static void read_loader_env(const char *name, char *def_fn, bool once) { UINTN len; char *fn, *freeme = NULL; len = 0; fn = def_fn; if (efi_freebsd_getenv(name, NULL, &len) == EFI_BUFFER_TOO_SMALL) { freeme = fn = malloc(len + 1); if (fn != NULL) { if (efi_freebsd_getenv(name, fn, &len) != EFI_SUCCESS) { free(fn); fn = NULL; printf( "Can't fetch FreeBSD::%s we know is there\n", name); } else { /* * if tagged as 'once' delete the env variable so we * only use it once. */ if (once) efi_freebsd_delenv(name); /* * We malloced 1 more than len above, then redid the call. * so now we have room at the end of the string to NUL terminate * it here, even if the typical idium would have '- 1' here to * not overflow. len should be the same on return both times. */ fn[len] = '\0'; } } else { printf( "Can't allocate %d bytes to fetch FreeBSD::%s env var\n", len, name); } } if (fn) { printf(" Reading loader env vars from %s\n", fn); parse_loader_efi_config(boot_img->DeviceHandle, fn); } } caddr_t ptov(uintptr_t x) { return ((caddr_t)x); } EFI_STATUS main(int argc, CHAR16 *argv[]) { EFI_GUID *guid; int howto, i, uhowto; UINTN k; bool has_kbd, is_last; char *s; EFI_DEVICE_PATH *imgpath; CHAR16 *text; EFI_STATUS rv; size_t sz, bosz = 0, bisz = 0; UINT16 boot_order[100]; char boot_info[4096]; char buf[32]; bool uefi_boot_mgr; archsw.arch_autoload = efi_autoload; archsw.arch_getdev = efi_getdev; archsw.arch_copyin = efi_copyin; archsw.arch_copyout = efi_copyout; #ifdef __amd64__ archsw.arch_hypervisor = x86_hypervisor; #endif archsw.arch_readin = efi_readin; archsw.arch_zfs_probe = efi_zfs_probe; /* Get our loaded image protocol interface structure. */ (void) OpenProtocolByHandle(IH, &imgid, (void **)&boot_img); /* * Chicken-and-egg problem; we want to have console output early, but * some console attributes may depend on reading from eg. the boot * device, which we can't do yet. We can use printf() etc. once this is * done. So, we set it to the efi console, then call console init. This * gets us printf early, but also primes the pump for all future console * changes to take effect, regardless of where they come from. */ setenv("console", "efi", 1); uhowto = parse_uefi_con_out(); #if defined(__aarch64__) || defined(__arm__) if ((uhowto & RB_SERIAL) != 0) setenv("console", "comconsole", 1); #endif cons_probe(); /* Init the time source */ efi_time_init(); /* * Initialise the block cache. Set the upper limit. */ bcache_init(32768, 512); /* * Scan the BLOCK IO MEDIA handles then * march through the device switch probing for things. */ i = efipart_inithandles(); if (i != 0 && i != ENOENT) { printf("efipart_inithandles failed with ERRNO %d, expect " "failures\n", i); } for (i = 0; devsw[i] != NULL; i++) if (devsw[i]->dv_init != NULL) (devsw[i]->dv_init)(); /* * Detect console settings two different ways: one via the command * args (eg -h) or via the UEFI ConOut variable. */ has_kbd = has_keyboard(); howto = parse_args(argc, argv); if (!has_kbd && (howto & RB_PROBE)) howto |= RB_SERIAL | RB_MULTIPLE; howto &= ~RB_PROBE; /* * Read additional environment variables from the boot device's * "LoaderEnv" file. Any boot loader environment variable may be set * there, which are subtly different than loader.conf variables. Only * the 'simple' ones may be set so things like foo_load="YES" won't work * for two reasons. First, the parser is simplistic and doesn't grok * quotes. Second, because the variables that cause an action to happen * are parsed by the lua, 4th or whatever code that's not yet * loaded. This is relative to the root directory when loader.efi is * loaded off the UFS root drive (when chain booted), or from the ESP * when directly loaded by the BIOS. * * We also read in NextLoaderEnv if it was specified. This allows next boot * functionality to be implemented and to override anything in LoaderEnv. */ read_loader_env("LoaderEnv", "/efi/freebsd/loader.env", false); read_loader_env("NextLoaderEnv", NULL, true); /* * We now have two notions of console. howto should be viewed as * overrides. If console is already set, don't set it again. */ #define VIDEO_ONLY 0 #define SERIAL_ONLY RB_SERIAL #define VID_SER_BOTH RB_MULTIPLE #define SER_VID_BOTH (RB_SERIAL | RB_MULTIPLE) #define CON_MASK (RB_SERIAL | RB_MULTIPLE) if (strcmp(getenv("console"), "efi") == 0) { if ((howto & CON_MASK) == 0) { /* No override, uhowto is controlling and efi cons is perfect */ howto = howto | (uhowto & CON_MASK); } else if ((howto & CON_MASK) == (uhowto & CON_MASK)) { /* override matches what UEFI told us, efi console is perfect */ } else if ((uhowto & (CON_MASK)) != 0) { /* * We detected a serial console on ConOut. All possible * overrides include serial. We can't really override what efi * gives us, so we use it knowing it's the best choice. */ /* Do nothing */ } else { /* * We detected some kind of serial in the override, but ConOut * has no serial, so we have to sort out which case it really is. */ switch (howto & CON_MASK) { case SERIAL_ONLY: setenv("console", "comconsole", 1); break; case VID_SER_BOTH: setenv("console", "efi comconsole", 1); break; case SER_VID_BOTH: setenv("console", "comconsole efi", 1); break; /* case VIDEO_ONLY can't happen -- it's the first if above */ } } } /* * howto is set now how we want to export the flags to the kernel, so * set the env based on it. */ boot_howto_to_env(howto); if (efi_copy_init()) { printf("failed to allocate staging area\n"); return (EFI_BUFFER_TOO_SMALL); } if ((s = getenv("fail_timeout")) != NULL) fail_timeout = strtol(s, NULL, 10); printf("%s\n", bootprog_info); printf(" Command line arguments:"); for (i = 0; i < argc; i++) printf(" %S", argv[i]); printf("\n"); printf(" Image base: 0x%lx\n", (unsigned long)boot_img->ImageBase); printf(" EFI version: %d.%02d\n", ST->Hdr.Revision >> 16, ST->Hdr.Revision & 0xffff); printf(" EFI Firmware: %S (rev %d.%02d)\n", ST->FirmwareVendor, ST->FirmwareRevision >> 16, ST->FirmwareRevision & 0xffff); printf(" Console: %s (%#x)\n", getenv("console"), howto); /* Determine the devpath of our image so we can prefer it. */ text = efi_devpath_name(boot_img->FilePath); if (text != NULL) { printf(" Load Path: %S\n", text); efi_setenv_freebsd_wcs("LoaderPath", text); efi_free_devpath_name(text); } rv = OpenProtocolByHandle(boot_img->DeviceHandle, &devid, (void **)&imgpath); if (rv == EFI_SUCCESS) { text = efi_devpath_name(imgpath); if (text != NULL) { printf(" Load Device: %S\n", text); efi_setenv_freebsd_wcs("LoaderDev", text); efi_free_devpath_name(text); } } if (getenv("uefi_ignore_boot_mgr") != NULL) { printf(" Ignoring UEFI boot manager\n"); uefi_boot_mgr = false; } else { uefi_boot_mgr = true; boot_current = 0; sz = sizeof(boot_current); rv = efi_global_getenv("BootCurrent", &boot_current, &sz); if (rv == EFI_SUCCESS) printf(" BootCurrent: %04x\n", boot_current); else { boot_current = 0xffff; uefi_boot_mgr = false; } sz = sizeof(boot_order); rv = efi_global_getenv("BootOrder", &boot_order, &sz); if (rv == EFI_SUCCESS) { printf(" BootOrder:"); for (i = 0; i < sz / sizeof(boot_order[0]); i++) printf(" %04x%s", boot_order[i], boot_order[i] == boot_current ? "[*]" : ""); printf("\n"); is_last = boot_order[(sz / sizeof(boot_order[0])) - 1] == boot_current; bosz = sz; } else if (uefi_boot_mgr) { /* * u-boot doesn't set BootOrder, but otherwise participates in the * boot manager protocol. So we fake it here and don't consider it * a failure. */ bosz = sizeof(boot_order[0]); boot_order[0] = boot_current; is_last = true; } } /* * Next, find the boot info structure the UEFI boot manager is * supposed to setup. We need this so we can walk through it to * find where we are in the booting process and what to try to * boot next. */ if (uefi_boot_mgr) { snprintf(buf, sizeof(buf), "Boot%04X", boot_current); sz = sizeof(boot_info); rv = efi_global_getenv(buf, &boot_info, &sz); if (rv == EFI_SUCCESS) bisz = sz; else uefi_boot_mgr = false; } /* * Disable the watchdog timer. By default the boot manager sets * the timer to 5 minutes before invoking a boot option. If we * want to return to the boot manager, we have to disable the * watchdog timer and since we're an interactive program, we don't * want to wait until the user types "quit". The timer may have * fired by then. We don't care if this fails. It does not prevent * normal functioning in any way... */ BS->SetWatchdogTimer(0, 0, 0, NULL); /* * Initialize the trusted/forbidden certificates from UEFI. * They will be later used to verify the manifest(s), * which should contain hashes of verified files. * This needs to be initialized before any configuration files * are loaded. */ #ifdef EFI_SECUREBOOT ve_efi_init(); #endif /* * Try and find a good currdev based on the image that was booted. * It might be desirable here to have a short pause to allow falling * through to the boot loader instead of returning instantly to follow * the boot protocol and also allow an escape hatch for users wishing * to try something different. */ if (find_currdev(uefi_boot_mgr, is_last, boot_info, bisz) != 0) if (uefi_boot_mgr && !interactive_interrupt("Failed to find bootable partition")) return (EFI_NOT_FOUND); efi_init_environment(); #if !defined(__arm__) for (k = 0; k < ST->NumberOfTableEntries; k++) { guid = &ST->ConfigurationTable[k].VendorGuid; if (!memcmp(guid, &smbios, sizeof(EFI_GUID))) { char buf[40]; snprintf(buf, sizeof(buf), "%p", ST->ConfigurationTable[k].VendorTable); setenv("hint.smbios.0.mem", buf, 1); smbios_detect(ST->ConfigurationTable[k].VendorTable); break; } } #endif interact(); /* doesn't return */ return (EFI_SUCCESS); /* keep compiler happy */ } COMMAND_SET(poweroff, "poweroff", "power off the system", command_poweroff); static int command_poweroff(int argc __unused, char *argv[] __unused) { int i; for (i = 0; devsw[i] != NULL; ++i) if (devsw[i]->dv_cleanup != NULL) (devsw[i]->dv_cleanup)(); RS->ResetSystem(EfiResetShutdown, EFI_SUCCESS, 0, NULL); /* NOTREACHED */ return (CMD_ERROR); } COMMAND_SET(reboot, "reboot", "reboot the system", command_reboot); static int command_reboot(int argc, char *argv[]) { int i; for (i = 0; devsw[i] != NULL; ++i) if (devsw[i]->dv_cleanup != NULL) (devsw[i]->dv_cleanup)(); RS->ResetSystem(EfiResetCold, EFI_SUCCESS, 0, NULL); /* NOTREACHED */ return (CMD_ERROR); } COMMAND_SET(quit, "quit", "exit the loader", command_quit); static int command_quit(int argc, char *argv[]) { exit(0); return (CMD_OK); } COMMAND_SET(memmap, "memmap", "print memory map", command_memmap); static int command_memmap(int argc __unused, char *argv[] __unused) { UINTN sz; EFI_MEMORY_DESCRIPTOR *map, *p; UINTN key, dsz; UINT32 dver; EFI_STATUS status; int i, ndesc; char line[80]; sz = 0; status = BS->GetMemoryMap(&sz, 0, &key, &dsz, &dver); if (status != EFI_BUFFER_TOO_SMALL) { printf("Can't determine memory map size\n"); return (CMD_ERROR); } map = malloc(sz); status = BS->GetMemoryMap(&sz, map, &key, &dsz, &dver); if (EFI_ERROR(status)) { printf("Can't read memory map\n"); return (CMD_ERROR); } ndesc = sz / dsz; snprintf(line, sizeof(line), "%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); pager_open(); if (pager_output(line)) { pager_close(); return (CMD_OK); } for (i = 0, p = map; i < ndesc; i++, p = NextMemoryDescriptor(p, dsz)) { snprintf(line, sizeof(line), "%23s %012jx %012jx %08jx ", efi_memory_type(p->Type), (uintmax_t)p->PhysicalStart, (uintmax_t)p->VirtualStart, (uintmax_t)p->NumberOfPages); if (pager_output(line)) break; if (p->Attribute & EFI_MEMORY_UC) printf("UC "); if (p->Attribute & EFI_MEMORY_WC) printf("WC "); if (p->Attribute & EFI_MEMORY_WT) printf("WT "); if (p->Attribute & EFI_MEMORY_WB) printf("WB "); if (p->Attribute & EFI_MEMORY_UCE) printf("UCE "); if (p->Attribute & EFI_MEMORY_WP) printf("WP "); if (p->Attribute & EFI_MEMORY_RP) printf("RP "); if (p->Attribute & EFI_MEMORY_XP) printf("XP "); if (p->Attribute & EFI_MEMORY_NV) printf("NV "); if (p->Attribute & EFI_MEMORY_MORE_RELIABLE) printf("MR "); if (p->Attribute & EFI_MEMORY_RO) printf("RO "); if (pager_output("\n")) break; } pager_close(); return (CMD_OK); } COMMAND_SET(configuration, "configuration", "print configuration tables", command_configuration); static int command_configuration(int argc, char *argv[]) { UINTN i; char *name; printf("NumberOfTableEntries=%lu\n", (unsigned long)ST->NumberOfTableEntries); for (i = 0; i < ST->NumberOfTableEntries; i++) { EFI_GUID *guid; printf(" "); guid = &ST->ConfigurationTable[i].VendorGuid; if (efi_guid_to_name(guid, &name) == true) { printf(name); free(name); } else { printf("Error while translating UUID to name"); } printf(" at %p\n", ST->ConfigurationTable[i].VendorTable); } return (CMD_OK); } COMMAND_SET(mode, "mode", "change or display EFI text modes", command_mode); static int command_mode(int argc, char *argv[]) { UINTN cols, rows; unsigned int mode; int i; char *cp; EFI_STATUS status; SIMPLE_TEXT_OUTPUT_INTERFACE *conout; conout = ST->ConOut; if (argc > 1) { mode = strtol(argv[1], &cp, 0); if (cp[0] != '\0') { printf("Invalid mode\n"); return (CMD_ERROR); } status = conout->QueryMode(conout, mode, &cols, &rows); if (EFI_ERROR(status)) { printf("invalid mode %d\n", mode); return (CMD_ERROR); } status = conout->SetMode(conout, mode); if (EFI_ERROR(status)) { printf("couldn't set mode %d\n", mode); return (CMD_ERROR); } (void) efi_cons_update_mode(); return (CMD_OK); } printf("Current mode: %d\n", conout->Mode->Mode); for (i = 0; i <= conout->Mode->MaxMode; i++) { status = conout->QueryMode(conout, i, &cols, &rows); if (EFI_ERROR(status)) continue; printf("Mode %d: %u columns, %u rows\n", i, (unsigned)cols, (unsigned)rows); } if (i != 0) printf("Select a mode with the command \"mode \"\n"); return (CMD_OK); } COMMAND_SET(lsefi, "lsefi", "list EFI handles", command_lsefi); static int command_lsefi(int argc __unused, char *argv[] __unused) { char *name; EFI_HANDLE *buffer = NULL; EFI_HANDLE handle; UINTN bufsz = 0, i, j; EFI_STATUS status; int ret = 0; status = BS->LocateHandle(AllHandles, NULL, NULL, &bufsz, buffer); if (status != EFI_BUFFER_TOO_SMALL) { snprintf(command_errbuf, sizeof (command_errbuf), "unexpected error: %lld", (long long)status); return (CMD_ERROR); } if ((buffer = malloc(bufsz)) == NULL) { sprintf(command_errbuf, "out of memory"); return (CMD_ERROR); } status = BS->LocateHandle(AllHandles, NULL, NULL, &bufsz, buffer); if (EFI_ERROR(status)) { free(buffer); snprintf(command_errbuf, sizeof (command_errbuf), "LocateHandle() error: %lld", (long long)status); return (CMD_ERROR); } pager_open(); for (i = 0; i < (bufsz / sizeof (EFI_HANDLE)); i++) { UINTN nproto = 0; EFI_GUID **protocols = NULL; handle = buffer[i]; printf("Handle %p", handle); if (pager_output("\n")) break; /* device path */ status = BS->ProtocolsPerHandle(handle, &protocols, &nproto); if (EFI_ERROR(status)) { snprintf(command_errbuf, sizeof (command_errbuf), "ProtocolsPerHandle() error: %lld", (long long)status); continue; } for (j = 0; j < nproto; j++) { if (efi_guid_to_name(protocols[j], &name) == true) { printf(" %s", name); free(name); } else { printf("Error while translating UUID to name"); } if ((ret = pager_output("\n")) != 0) break; } BS->FreePool(protocols); if (ret != 0) break; } pager_close(); free(buffer); return (CMD_OK); } #ifdef LOADER_FDT_SUPPORT extern int command_fdt_internal(int argc, char *argv[]); /* * Since proper fdt command handling function is defined in fdt_loader_cmd.c, * and declaring it as extern is in contradiction with COMMAND_SET() macro * (which uses static pointer), we're defining wrapper function, which * calls the proper fdt handling routine. */ static int command_fdt(int argc, char *argv[]) { return (command_fdt_internal(argc, argv)); } COMMAND_SET(fdt, "fdt", "flattened device tree handling", command_fdt); #endif /* * Chain load another efi loader. */ static int command_chain(int argc, char *argv[]) { EFI_GUID LoadedImageGUID = LOADED_IMAGE_PROTOCOL; EFI_HANDLE loaderhandle; EFI_LOADED_IMAGE *loaded_image; EFI_STATUS status; struct stat st; struct devdesc *dev; char *name, *path; void *buf; int fd; if (argc < 2) { command_errmsg = "wrong number of arguments"; return (CMD_ERROR); } name = argv[1]; if ((fd = open(name, O_RDONLY)) < 0) { command_errmsg = "no such file"; return (CMD_ERROR); } #ifdef LOADER_VERIEXEC if (verify_file(fd, name, 0, VE_MUST, __func__) < 0) { sprintf(command_errbuf, "can't verify: %s", name); close(fd); return (CMD_ERROR); } #endif if (fstat(fd, &st) < -1) { command_errmsg = "stat failed"; close(fd); return (CMD_ERROR); } status = BS->AllocatePool(EfiLoaderCode, (UINTN)st.st_size, &buf); if (status != EFI_SUCCESS) { command_errmsg = "failed to allocate buffer"; close(fd); return (CMD_ERROR); } if (read(fd, buf, st.st_size) != st.st_size) { command_errmsg = "error while reading the file"; (void)BS->FreePool(buf); close(fd); return (CMD_ERROR); } close(fd); status = BS->LoadImage(FALSE, IH, NULL, buf, st.st_size, &loaderhandle); (void)BS->FreePool(buf); if (status != EFI_SUCCESS) { command_errmsg = "LoadImage failed"; return (CMD_ERROR); } status = OpenProtocolByHandle(loaderhandle, &LoadedImageGUID, (void **)&loaded_image); if (argc > 2) { int i, len = 0; CHAR16 *argp; for (i = 2; i < argc; i++) len += strlen(argv[i]) + 1; len *= sizeof (*argp); loaded_image->LoadOptions = argp = malloc (len); loaded_image->LoadOptionsSize = len; for (i = 2; i < argc; i++) { char *ptr = argv[i]; while (*ptr) *(argp++) = *(ptr++); *(argp++) = ' '; } *(--argv) = 0; } if (efi_getdev((void **)&dev, name, (const char **)&path) == 0) { #ifdef EFI_ZFS_BOOT struct zfs_devdesc *z_dev; #endif struct disk_devdesc *d_dev; pdinfo_t *hd, *pd; switch (dev->d_dev->dv_type) { #ifdef EFI_ZFS_BOOT case DEVT_ZFS: z_dev = (struct zfs_devdesc *)dev; loaded_image->DeviceHandle = efizfs_get_handle_by_guid(z_dev->pool_guid); break; #endif case DEVT_NET: loaded_image->DeviceHandle = efi_find_handle(dev->d_dev, dev->d_unit); break; default: hd = efiblk_get_pdinfo(dev); if (STAILQ_EMPTY(&hd->pd_part)) { loaded_image->DeviceHandle = hd->pd_handle; break; } d_dev = (struct disk_devdesc *)dev; STAILQ_FOREACH(pd, &hd->pd_part, pd_link) { /* * d_partition should be 255 */ if (pd->pd_unit == (uint32_t)d_dev->d_slice) { loaded_image->DeviceHandle = pd->pd_handle; break; } } break; } } dev_cleanup(); status = BS->StartImage(loaderhandle, NULL, NULL); if (status != EFI_SUCCESS) { command_errmsg = "StartImage failed"; free(loaded_image->LoadOptions); loaded_image->LoadOptions = NULL; status = BS->UnloadImage(loaded_image); return (CMD_ERROR); } return (CMD_ERROR); /* not reached */ } COMMAND_SET(chain, "chain", "chain load file", command_chain); Index: head/stand/i386/gptzfsboot/Makefile =================================================================== --- head/stand/i386/gptzfsboot/Makefile (revision 362430) +++ head/stand/i386/gptzfsboot/Makefile (revision 362431) @@ -1,75 +1,82 @@ # $FreeBSD$ .include .PATH: ${BOOTSRC}/i386/boot2 ${BOOTSRC}/i386/gptboot \ ${BOOTSRC}/i386/zfsboot ${BOOTSRC}/i386/common \ - ${SASRC} + ${BOOTSRC}/common FILES= gptzfsboot MAN= gptzfsboot.8 BOOT_COMCONSOLE_PORT?= 0x3f8 BOOT_COMCONSOLE_SPEED?= 9600 B2SIOFMT?= 0x3 REL1= 0x700 ORG1= 0x7c00 ORG2= 0x0 CFLAGS+=-DBOOTPROG=\"gptzfsboot\" \ -O1 \ - -DGPT -DZFS -DBOOT2 \ + -DBOOT2 \ + -DLOADER_GPT_SUPPORT \ + -DLOADER_MBR_SUPPORT \ + -DLOADER_ZFS_SUPPORT \ -DSIOPRT=${BOOT_COMCONSOLE_PORT} \ -DSIOFMT=${B2SIOFMT} \ -DSIOSPD=${BOOT_COMCONSOLE_SPEED} \ -I${LDRSRC} \ -I${BOOTSRC}/i386/common \ + -I${BOOTSRC}/i386/libi386 \ -I${ZFSSRC} \ -I${SYSDIR}/crypto/skein \ -I${SYSDIR}/cddl/boot/zfs \ -I${SYSDIR}/cddl/contrib/opensolaris/uts/common \ -I${SYSDIR}/cddl/contrib/opensolaris/common/lz4 \ -I${BOOTSRC}/i386/btx/lib \ -I${BOOTSRC}/i386/boot2 \ -DHAVE_MEMCPY -I${SRCTOP}/sys/contrib/zlib \ -Wall -Waggregate-return -Wbad-function-cast \ -Wmissing-declarations -Wmissing-prototypes -Wnested-externs \ -Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings \ -Wno-pointer-sign CFLAGS.clang+= -Wno-tentative-definition-incomplete-type NO_WCAST_ALIGN= CFLAGS.gcc+= --param max-inline-insns-single=100 LD_FLAGS+=${LD_FLAGS_BIN} CLEANFILES+= gptzfsboot gptzfsboot: gptldr.bin gptzfsboot.bin ${BTXKERN} btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l gptldr.bin \ -o ${.TARGET} gptzfsboot.bin CLEANFILES+= gptldr.bin gptldr.out gptldr.o gptldr.bin: gptldr.out ${OBJCOPY} -S -O binary gptldr.out ${.TARGET} gptldr.out: gptldr.o ${LD} ${LD_FLAGS} -e start --defsym ORG=${ORG1} -T ${LDSCRIPT} -o ${.TARGET} gptldr.o -CLEANFILES+= gptzfsboot.bin gptzfsboot.out zfsboot.o sio.o cons.o \ - drv.o gpt.o ${OPENCRYPTO_XTS} +OBJS= zfsboot.o sio.o cons.o bcache.o devopen.o disk.o part.o zfs_cmd.o +CLEANFILES+= gptzfsboot.bin gptzfsboot.out ${OBJS} ${OPENCRYPTO_XTS} +# i386 standalone support library +LIBI386= ${BOOTOBJ}/i386/libi386/libi386.a + gptzfsboot.bin: gptzfsboot.out ${OBJCOPY} -S -O binary gptzfsboot.out ${.TARGET} -gptzfsboot.out: ${BTXCRT} zfsboot.o sio.o gpt.o drv.o cons.o \ +gptzfsboot.out: ${BTXCRT} ${OBJS} \ ${OPENCRYPTO_XTS} - ${LD} ${LD_FLAGS} --defsym ORG=${ORG2} -T ${LDSCRIPT} -o ${.TARGET} ${.ALLSRC} ${LIBSA32} + ${LD} ${LD_FLAGS} --defsym ORG=${ORG2} -T ${LDSCRIPT} -o ${.TARGET} ${.ALLSRC} ${LIBI386} ${LIBSA32} zfsboot.o: ${ZFSSRC}/zfsimpl.c .include Index: head/stand/i386/libi386/Makefile =================================================================== --- head/stand/i386/libi386/Makefile (revision 362430) +++ head/stand/i386/libi386/Makefile (revision 362431) @@ -1,45 +1,47 @@ # $FreeBSD$ .include LIB= i386 SRCS= bio.c biosacpi.c biosdisk.c biosmem.c biospnp.c \ biospci.c biossmap.c bootinfo.c bootinfo32.c bootinfo64.c \ comconsole.c devicename.c elf32_freebsd.c \ elf64_freebsd.c multiboot.c multiboot_tramp.S relocater_tramp.S \ i386_copy.c i386_module.c nullconsole.c pxe.c pxetramp.S \ time.c vidconsole.c amd64_tramp.S spinconsole.c .PATH: ${ZFSSRC} SRCS+= devicename_stubs.c CFLAGS+= -I${ZFSSRC} .PATH: ${SYSDIR}/teken SRCS+= teken.c BOOT_COMCONSOLE_PORT?= 0x3f8 CFLAGS+= -DCOMPORT=${BOOT_COMCONSOLE_PORT} BOOT_COMCONSOLE_SPEED?= 9600 CFLAGS+= -DCOMSPEED=${BOOT_COMCONSOLE_SPEED} .ifdef(BOOT_BIOSDISK_DEBUG) # Make the disk code more talkative CFLAGS+= -DDISK_DEBUG .endif # terminal emulation CFLAGS.vidconsole.c+= -I${SRCTOP}/sys/teken CFLAGS.teken.c+= -I${SRCTOP}/sys/teken # XXX: make alloca() useable CFLAGS+= -Dalloca=__builtin_alloca CFLAGS+= -I${BOOTSRC}/ficl -I${BOOTSRC}/ficl/i386 \ -I${LDRSRC} -I${BOOTSRC}/i386/common \ + -I${SYSDIR}/cddl/boot/zfs \ + -I${SYSDIR}/cddl/contrib/opensolaris/uts/common \ -I${SYSDIR}/contrib/dev/acpica/include # Handle FreeBSD specific %b and %D printf format specifiers CFLAGS+= ${FORMAT_EXTENSIONS} .include Index: head/stand/i386/zfsboot/Makefile =================================================================== --- head/stand/i386/zfsboot/Makefile (revision 362430) +++ head/stand/i386/zfsboot/Makefile (revision 362431) @@ -1,82 +1,92 @@ # $FreeBSD$ .include -.PATH: ${BOOTSRC}/i386/boot2 ${BOOTSRC}/i386/common ${SASRC} +.PATH: ${BOOTSRC}/i386/boot2 ${BOOTSRC}/i386/common ${BOOTSRC}/common FILES= zfsboot MAN= zfsboot.8 BOOT_COMCONSOLE_PORT?= 0x3f8 BOOT_COMCONSOLE_SPEED?= 9600 B2SIOFMT?= 0x3 REL1= 0x700 ORG1= 0x7c00 ORG2= 0x2000 CFLAGS+=-DBOOTPROG=\"zfsboot\" \ -O1 \ - -DZFS -DBOOT2 \ + -DBOOT2 \ + -DLOADER_GPT_SUPPORT \ + -DLOADER_MBR_SUPPORT \ + -DLOADER_ZFS_SUPPORT \ + -DLOADER_UFS_SUPPORT \ -DSIOPRT=${BOOT_COMCONSOLE_PORT} \ -DSIOFMT=${B2SIOFMT} \ -DSIOSPD=${BOOT_COMCONSOLE_SPEED} \ -I${LDRSRC} \ -I${BOOTSRC}/i386/common \ - -I${BOOTSRC}/i386 \ + -I${BOOTSRC}/i386/libi386 \ -I${ZFSSRC} \ -I${SYSDIR}/crypto/skein \ -I${SYSDIR}/cddl/boot/zfs \ -I${SYSDIR}/cddl/contrib/opensolaris/uts/common \ -I${SYSDIR}/cddl/contrib/opensolaris/common/lz4 \ -I${BOOTSRC}/i386/boot2 \ -Wall -Waggregate-return -Wbad-function-cast -Wno-cast-align \ -Wmissing-declarations -Wmissing-prototypes -Wnested-externs \ -Wpointer-arith -Wshadow -Wstrict-prototypes -Wwrite-strings +CFLAGS.part.c+= -DHAVE_MEMCPY -I${SRCTOP}/sys/contrib/zlib + CFLAGS.gcc+= --param max-inline-insns-single=100 LD_FLAGS+=${LD_FLAGS_BIN} CLEANFILES+= zfsboot zfsboot: zfsboot1 zfsboot2 cat zfsboot1 zfsboot2 > zfsboot CLEANFILES+= zfsboot1 zfsldr.out zfsldr.o zfsboot1: zfsldr.out ${OBJCOPY} -S -O binary zfsldr.out ${.TARGET} zfsldr.out: zfsldr.o ${LD} ${LD_FLAGS} -e start --defsym ORG=${ORG1} -T ${LDSCRIPT} -o ${.TARGET} zfsldr.o +OBJS= zfsboot.o sio.o cons.o bcache.o devopen.o disk.o part.o zfs_cmd.o CLEANFILES+= zfsboot2 zfsboot.ld zfsboot.ldr zfsboot.bin zfsboot.out \ - zfsboot.o zfsboot.s zfsboot.s.tmp sio.o cons.o drv.o + ${OBJS} # We currently allow 256k bytes for zfsboot - in practice it could be # any size up to 3.5Mb but keeping it fixed size simplifies zfsldr. # BOOT2SIZE= 262144 +# i386 standalone support library +LIBI386= ${BOOTOBJ}/i386/libi386/libi386.a + zfsboot2: zfsboot.ld @set -- `ls -l ${.ALLSRC}`; x=$$((${BOOT2SIZE}-$$5)); \ echo "$$x bytes available"; test $$x -ge 0 ${DD} if=${.ALLSRC} of=${.TARGET} bs=${BOOT2SIZE} conv=sync zfsboot.ld: zfsboot.ldr zfsboot.bin ${BTXKERN} btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l zfsboot.ldr \ -o ${.TARGET} -P 1 zfsboot.bin zfsboot.ldr: cp /dev/null ${.TARGET} zfsboot.bin: zfsboot.out ${OBJCOPY} -S -O binary zfsboot.out ${.TARGET} -zfsboot.out: ${BTXCRT} zfsboot.o sio.o drv.o cons.o - ${LD} ${LD_FLAGS} --defsym ORG=${ORG2} -T ${LDSCRIPT} -o ${.TARGET} ${.ALLSRC} ${LIBSA32} +zfsboot.out: ${BTXCRT} ${OBJS} + ${LD} ${LD_FLAGS} --defsym ORG=${ORG2} -T ${LDSCRIPT} -o ${.TARGET} ${.ALLSRC} ${LIBI386} ${LIBSA32} SRCS= zfsboot.c .include Index: head/stand/i386/zfsboot/zfsboot.c =================================================================== --- head/stand/i386/zfsboot/zfsboot.c (revision 362430) +++ head/stand/i386/zfsboot/zfsboot.c (revision 362431) @@ -1,1219 +1,697 @@ /*- * Copyright (c) 1998 Robert Nordier * All rights reserved. * * Redistribution and use in source and binary forms are freely * permitted provided that the above copyright notice and this * paragraph and the following disclaimer are duplicated in all * such forms. * * This software is provided "AS IS" and without any express or * implied warranties, including, without limitation, the implied * warranties of merchantability and fitness for a particular * purpose. */ #include __FBSDID("$FreeBSD$"); -#include "stand.h" +#include #include #include #include #ifdef GPT #include #endif #include #include #include #include #include #include #include #include - +#include "bootstrap.h" +#include "libi386.h" #include #include "lib.h" #include "rbx.h" -#include "drv.h" -#include "edd.h" #include "cons.h" #include "bootargs.h" +#include "disk.h" +#include "part.h" #include "paths.h" #include "libzfs.h" #define ARGS 0x900 #define NOPT 14 #define NDEV 3 #define BIOS_NUMDRIVES 0x475 #define DRV_HARD 0x80 #define DRV_MASK 0x7f #define TYPE_AD 0 #define TYPE_DA 1 #define TYPE_MAXHARD TYPE_DA #define TYPE_FD 2 -#define DEV_GELIBOOT_BSIZE 4096 - extern uint32_t _end; -#ifdef GPT -static const uuid_t freebsd_zfs_uuid = GPT_ENT_TYPE_FREEBSD_ZFS; -#endif static const char optstr[NOPT] = "DhaCcdgmnpqrsv"; /* Also 'P', 'S' */ static const unsigned char flags[NOPT] = { RBX_DUAL, RBX_SERIAL, RBX_ASKNAME, RBX_CDROM, RBX_CONFIG, RBX_KDB, RBX_GDB, RBX_MUTE, RBX_NOINTR, RBX_PAUSE, RBX_QUIET, RBX_DFLTROOT, RBX_SINGLE, RBX_VERBOSE }; uint32_t opts; /* * Paths to try loading before falling back to the boot2 prompt. * * /boot/zfsloader must be tried before /boot/loader in order to remain * backward compatible with ZFS boot environments where /boot/loader exists * but does not have ZFS support, which was the case before FreeBSD 12. * * If no loader is found, try to load a kernel directly instead. */ static const struct string { const char *p; size_t len; } loadpath[] = { { PATH_LOADER_ZFS, sizeof(PATH_LOADER_ZFS) }, { PATH_LOADER, sizeof(PATH_LOADER) }, { PATH_KERNEL, sizeof(PATH_KERNEL) }, }; static const unsigned char dev_maj[NDEV] = {30, 4, 2}; +static struct i386_devdesc *bdev; static char cmd[512]; static char cmddup[512]; static char kname[1024]; -static char rootname[256]; static int comspeed = SIOSPD; static struct bootinfo bootinfo; static uint32_t bootdev; static struct zfs_boot_args zfsargs; +#ifdef LOADER_GELI_SUPPORT +static struct geli_boot_args geliargs; +#endif -vm_offset_t high_heap_base; -uint32_t bios_basemem, bios_extmem, high_heap_size; +extern vm_offset_t high_heap_base; +extern uint32_t bios_basemem, bios_extmem, high_heap_size; -static struct bios_smap smap; +static char *heap_top; +static char *heap_bottom; -/* - * The minimum amount of memory to reserve in bios_extmem for the heap. - */ -#define HEAP_MIN (64 * 1024 * 1024) - -static char *heap_next; -static char *heap_end; - -/* Buffers that must not span a 64k boundary. */ -#define READ_BUF_SIZE 8192 -struct dmadat { - char rdbuf[READ_BUF_SIZE]; /* for reading large things */ - char secbuf[READ_BUF_SIZE]; /* for MBR/disklabel */ -}; -static struct dmadat *dmadat; - void exit(int); -void reboot(void); +static void i386_zfs_probe(void); static void load(void); static int parse_cmd(void); -static void bios_getmem(void); -int main(void); #ifdef LOADER_GELI_SUPPORT #include "geliboot.h" static char gelipw[GELI_PW_MAXLEN]; #endif -struct zfsdsk { - struct dsk dsk; -#ifdef LOADER_GELI_SUPPORT - struct geli_dev *gdev; +struct arch_switch archsw; /* MI/MD interface boundary */ +static char boot_devname[2 * ZFS_MAXNAMELEN + 8]; /* disk or pool:dataset */ + +struct devsw *devsw[] = { + &bioshd, +#if defined(LOADER_ZFS_SUPPORT) + &zfs_dev, #endif + NULL }; -#include "zfsimpl.c" - -/* - * Read from a dnode (which must be from a ZPL filesystem). - */ -static int -zfs_read(spa_t *spa, const dnode_phys_t *dnode, off_t *offp, void *start, - size_t size) -{ - const znode_phys_t *zp = (const znode_phys_t *) dnode->dn_bonus; - size_t n; - int rc; - - n = size; - if (*offp + n > zp->zp_size) - n = zp->zp_size - *offp; - - rc = dnode_read(spa, dnode, *offp, start, n); - if (rc) - return (-1); - *offp += n; - - return (n); -} - -/* - * Current ZFS pool - */ -static spa_t *spa; -static spa_t *primary_spa; -static vdev_t *primary_vdev; - -/* - * A wrapper for dskread that doesn't have to worry about whether the - * buffer pointer crosses a 64k boundary. - */ -static int -vdev_read(void *xvdev, void *priv, off_t off, void *buf, size_t bytes) -{ - char *p; - daddr_t lba, alignlba; - off_t diff; - unsigned int nb, alignnb; - struct zfsdsk *zdsk = priv; - - if ((off & (DEV_BSIZE - 1)) || (bytes & (DEV_BSIZE - 1))) - return (-1); - - p = buf; - lba = off / DEV_BSIZE; - lba += zdsk->dsk.start; - /* - * Align reads to 4k else 4k sector GELIs will not decrypt. - * Round LBA down to nearest multiple of DEV_GELIBOOT_BSIZE bytes. - */ - alignlba = rounddown2(off, DEV_GELIBOOT_BSIZE) / DEV_BSIZE; - /* - * The read must be aligned to DEV_GELIBOOT_BSIZE bytes relative to the - * start of the GELI partition, not the start of the actual disk. - */ - alignlba += zdsk->dsk.start; - diff = (lba - alignlba) * DEV_BSIZE; - - while (bytes > 0) { - nb = bytes / DEV_BSIZE; - /* - * Ensure that the read size plus the leading offset does not - * exceed the size of the read buffer. - */ - if (nb > (READ_BUF_SIZE - diff) / DEV_BSIZE) - nb = (READ_BUF_SIZE - diff) / DEV_BSIZE; - /* - * Round the number of blocks to read up to the nearest multiple - * of DEV_GELIBOOT_BSIZE. - */ - alignnb = roundup2(nb * DEV_BSIZE + diff, DEV_GELIBOOT_BSIZE) - / DEV_BSIZE; - - if (zdsk->dsk.size > 0 && alignlba + alignnb > - zdsk->dsk.size + zdsk->dsk.start) { - printf("Shortening read at %lld from %d to %lld\n", - alignlba, alignnb, - (zdsk->dsk.size + zdsk->dsk.start) - alignlba); - alignnb = (zdsk->dsk.size + zdsk->dsk.start) - alignlba; - } - - if (drvread(&zdsk->dsk, dmadat->rdbuf, alignlba, alignnb)) - return (-1); -#ifdef LOADER_GELI_SUPPORT - /* decrypt */ - if (zdsk->gdev != NULL) { - if (geli_read(zdsk->gdev, - ((alignlba - zdsk->dsk.start) * DEV_BSIZE), - dmadat->rdbuf, alignnb * DEV_BSIZE)) - return (-1); - } +struct fs_ops *file_system[] = { +#if defined(LOADER_ZFS_SUPPORT) + &zfs_fsops, #endif - memcpy(p, dmadat->rdbuf + diff, nb * DEV_BSIZE); - p += nb * DEV_BSIZE; - lba += nb; - alignlba += alignnb; - bytes -= nb * DEV_BSIZE; - /* Don't need the leading offset after the first block. */ - diff = 0; - } - - return (0); -} -/* Match the signature exactly due to signature madness */ -static int -vdev_read2(vdev_t *vdev, void *priv, off_t off, void *buf, size_t bytes) -{ - return (vdev_read(vdev, priv, off, buf, bytes)); -} - - -static int -vdev_write(vdev_t *vdev, void *priv, off_t off, void *buf, size_t bytes) -{ - char *p; - daddr_t lba; - unsigned int nb; - struct zfsdsk *zdsk = priv; - - if ((off & (DEV_BSIZE - 1)) || (bytes & (DEV_BSIZE - 1))) - return (-1); - - p = buf; - lba = off / DEV_BSIZE; - lba += zdsk->dsk.start; - while (bytes > 0) { - nb = bytes / DEV_BSIZE; - if (nb > READ_BUF_SIZE / DEV_BSIZE) - nb = READ_BUF_SIZE / DEV_BSIZE; - memcpy(dmadat->rdbuf, p, nb * DEV_BSIZE); - if (drvwrite(&zdsk->dsk, dmadat->rdbuf, lba, nb)) - return (-1); - p += nb * DEV_BSIZE; - lba += nb; - bytes -= nb * DEV_BSIZE; - } - - return (0); -} - -static int -xfsread(const dnode_phys_t *dnode, off_t *offp, void *buf, size_t nbyte) -{ - if ((size_t)zfs_read(spa, dnode, offp, buf, nbyte) != nbyte) { - printf("Invalid format\n"); - return (-1); - } - return (0); -} - -/* - * Read Pad2 (formerly "Boot Block Header") area of the first - * vdev label of the given vdev. - */ -static int -vdev_read_pad2(vdev_t *vdev, char *buf, size_t size) -{ - blkptr_t bp; - char *tmp; - off_t off = offsetof(vdev_label_t, vl_pad2); - int rc; - - if (size > VDEV_PAD_SIZE) - size = VDEV_PAD_SIZE; - - tmp = malloc(VDEV_PAD_SIZE); - if (tmp == NULL) - return (ENOMEM); - - BP_ZERO(&bp); - BP_SET_LSIZE(&bp, VDEV_PAD_SIZE); - BP_SET_PSIZE(&bp, VDEV_PAD_SIZE); - BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); - BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); - DVA_SET_OFFSET(BP_IDENTITY(&bp), off); - rc = vdev_read_phys(vdev, &bp, tmp, off, 0); - if (rc == 0) - memcpy(buf, tmp, size); - free(tmp); - return (rc); -} - -static int -vdev_clear_pad2(vdev_t *vdev) -{ - char *zeroes; - uint64_t *end; - off_t off = offsetof(vdev_label_t, vl_pad2); - int rc; - - zeroes = malloc(VDEV_PAD_SIZE); - if (zeroes == NULL) - return (ENOMEM); - - memset(zeroes, 0, VDEV_PAD_SIZE); - end = (uint64_t *)(zeroes + VDEV_PAD_SIZE); - /* ZIO_CHECKSUM_LABEL magic and pre-calcualted checksum for all zeros */ - end[-5] = 0x0210da7ab10c7a11; - end[-4] = 0x97f48f807f6e2a3f; - end[-3] = 0xaf909f1658aacefc; - end[-2] = 0xcbd1ea57ff6db48b; - end[-1] = 0x6ec692db0d465fab; - rc = vdev_write(vdev, vdev->v_read_priv, off, zeroes, VDEV_PAD_SIZE); - free(zeroes); - return (rc); -} - -static void -bios_getmem(void) -{ - uint64_t size; - - /* Parse system memory map */ - v86.ebx = 0; - do { - v86.ctl = V86_FLAGS; - v86.addr = 0x15; /* int 0x15 function 0xe820 */ - v86.eax = 0xe820; - v86.ecx = sizeof(struct bios_smap); - v86.edx = SMAP_SIG; - v86.es = VTOPSEG(&smap); - v86.edi = VTOPOFF(&smap); - v86int(); - if (V86_CY(v86.efl) || (v86.eax != SMAP_SIG)) - break; - /* look for a low-memory segment that's large enough */ - if ((smap.type == SMAP_TYPE_MEMORY) && (smap.base == 0) && - (smap.length >= (512 * 1024))) - bios_basemem = smap.length; - /* look for the first segment in 'extended' memory */ - if ((smap.type == SMAP_TYPE_MEMORY) && - (smap.base == 0x100000)) { - bios_extmem = smap.length; - } - - /* - * Look for the largest segment in 'extended' memory beyond - * 1MB but below 4GB. - */ - if ((smap.type == SMAP_TYPE_MEMORY) && (smap.base > 0x100000) && - (smap.base < 0x100000000ull)) { - size = smap.length; - - /* - * If this segment crosses the 4GB boundary, - * truncate it. - */ - if (smap.base + size > 0x100000000ull) - size = 0x100000000ull - smap.base; - - if (size > high_heap_size) { - high_heap_size = size; - high_heap_base = smap.base; - } - } - } while (v86.ebx != 0); - - /* Fall back to the old compatibility function for base memory */ - if (bios_basemem == 0) { - v86.ctl = 0; - v86.addr = 0x12; /* int 0x12 */ - v86int(); - - bios_basemem = (v86.eax & 0xffff) * 1024; - } - - /* - * Fall back through several compatibility functions for extended - * memory. - */ - if (bios_extmem == 0) { - v86.ctl = V86_FLAGS; - v86.addr = 0x15; /* int 0x15 function 0xe801 */ - v86.eax = 0xe801; - v86int(); - if (!V86_CY(v86.efl)) { - bios_extmem = ((v86.ecx & 0xffff) + - ((v86.edx & 0xffff) * 64)) * 1024; - } - } - if (bios_extmem == 0) { - v86.ctl = 0; - v86.addr = 0x15; /* int 0x15 function 0x88 */ - v86.eax = 0x8800; - v86int(); - bios_extmem = (v86.eax & 0xffff) * 1024; - } - - /* - * If we have extended memory and did not find a suitable heap - * region in the SMAP, use the last 3MB of 'extended' memory as a - * high heap candidate. - */ - if (bios_extmem >= HEAP_MIN && high_heap_size < HEAP_MIN) { - high_heap_size = HEAP_MIN; - high_heap_base = bios_extmem + 0x100000 - HEAP_MIN; - } -} - -/* - * Try to detect a device supported by the legacy int13 BIOS - */ -static int -int13probe(int drive) -{ - v86.ctl = V86_FLAGS; - v86.addr = 0x13; - v86.eax = 0x800; - v86.edx = drive; - v86int(); - - if (!V86_CY(v86.efl) && /* carry clear */ - ((v86.edx & 0xff) != (drive & DRV_MASK))) { /* unit # OK */ - if ((v86.ecx & 0x3f) == 0) { /* absurd sector size */ - return (0); /* skip device */ - } - return (1); - } - return (0); -} - -/* - * We call this when we find a ZFS vdev - ZFS consumes the dsk - * structure so we must make a new one. - */ -static struct zfsdsk * -copy_dsk(struct zfsdsk *zdsk) -{ - struct zfsdsk *newdsk; - - newdsk = malloc(sizeof(struct zfsdsk)); - *newdsk = *zdsk; - return (newdsk); -} - -/* - * Get disk size from GPT. - */ -static uint64_t -drvsize_gpt(struct dsk *dskp) -{ -#ifdef GPT - struct gpt_hdr hdr; - char *sec; - - sec = dmadat->secbuf; - if (drvread(dskp, sec, 1, 1)) - return (0); - - memcpy(&hdr, sec, sizeof(hdr)); - if (memcmp(hdr.hdr_sig, GPT_HDR_SIG, sizeof(hdr.hdr_sig)) != 0 || - hdr.hdr_lba_self != 1 || hdr.hdr_revision < 0x00010000 || - hdr.hdr_entsz < sizeof(struct gpt_ent) || - DEV_BSIZE % hdr.hdr_entsz != 0) { - return (0); - } - return (hdr.hdr_lba_alt + 1); -#else - return (0); +#if defined(LOADER_UFS_SUPPORT) + &ufs_fsops, #endif -} + NULL +}; -/* - * Get disk size from eax=0x800 and 0x4800. We need to probe both - * because 0x4800 may not be available and we would like to get more - * or less correct disk size - if it is possible at all. - * Note we do not really want to touch drv.c because that code is shared - * with boot2 and we can not afford to grow that code. - */ -static uint64_t -drvsize_ext(struct zfsdsk *zdsk) +caddr_t +ptov(uintptr_t x) { - struct dsk *dskp; - uint64_t size, tmp; - int cyl, hds, sec; - - dskp = &zdsk->dsk; - - /* Try to read disk size from GPT */ - size = drvsize_gpt(dskp); - if (size != 0) - return (size); - - v86.ctl = V86_FLAGS; - v86.addr = 0x13; - v86.eax = 0x800; - v86.edx = dskp->drive; - v86int(); - - /* Don't error out if we get bad sector number, try EDD as well */ - if (V86_CY(v86.efl) || /* carry set */ - (v86.edx & 0xff) <= (unsigned)(dskp->drive & 0x7f)) /* unit # bad */ - return (0); - cyl = ((v86.ecx & 0xc0) << 2) + ((v86.ecx & 0xff00) >> 8) + 1; - /* Convert max head # -> # of heads */ - hds = ((v86.edx & 0xff00) >> 8) + 1; - sec = v86.ecx & 0x3f; - - size = (uint64_t)cyl * hds * sec; - - /* Determine if we can use EDD with this device. */ - v86.ctl = V86_FLAGS; - v86.addr = 0x13; - v86.eax = 0x4100; - v86.edx = dskp->drive; - v86.ebx = 0x55aa; - v86int(); - if (V86_CY(v86.efl) || /* carry set */ - (v86.ebx & 0xffff) != 0xaa55 || /* signature */ - (v86.ecx & EDD_INTERFACE_FIXED_DISK) == 0) - return (size); - - tmp = drvsize(dskp); - if (tmp > size) - size = tmp; - - return (size); + return (PTOV(x)); } -/* - * The "layered" ioctl to read disk/partition size. Unfortunately - * the zfsboot case is hardest, because we do not have full software - * stack available, so we need to do some manual work here. - */ -uint64_t -ldi_get_size(void *priv) -{ - struct zfsdsk *zdsk = priv; - uint64_t size = zdsk->dsk.size; - - if (zdsk->dsk.start == 0) - size = drvsize_ext(zdsk); - - return (size * DEV_BSIZE); -} - -static void -probe_drive(struct zfsdsk *zdsk) -{ -#ifdef GPT - struct gpt_hdr hdr; - struct gpt_ent *ent; - unsigned part, entries_per_sec; - daddr_t slba; -#endif -#if defined(GPT) || defined(LOADER_GELI_SUPPORT) - daddr_t elba; -#endif - - struct dos_partition *dp; - char *sec; - unsigned i; - -#ifdef LOADER_GELI_SUPPORT - /* - * Taste the disk, if it is GELI encrypted, decrypt it then dig out the - * partition table and probe each slice/partition in turn for a vdev or - * GELI encrypted vdev. - */ - elba = drvsize_ext(zdsk); - if (elba > 0) { - elba--; - } - zdsk->gdev = geli_taste(vdev_read, zdsk, elba, "disk%u:0:"); - if ((zdsk->gdev != NULL) && (geli_havekey(zdsk->gdev) == 0)) - geli_passphrase(zdsk->gdev, gelipw); -#endif /* LOADER_GELI_SUPPORT */ - - sec = dmadat->secbuf; - zdsk->dsk.start = 0; - -#ifdef GPT - /* - * First check for GPT. - */ - if (drvread(&zdsk->dsk, sec, 1, 1)) { - return; - } - memcpy(&hdr, sec, sizeof(hdr)); - if (memcmp(hdr.hdr_sig, GPT_HDR_SIG, sizeof(hdr.hdr_sig)) != 0 || - hdr.hdr_lba_self != 1 || hdr.hdr_revision < 0x00010000 || - hdr.hdr_entsz < sizeof(*ent) || DEV_BSIZE % hdr.hdr_entsz != 0) { - goto trymbr; - } - - /* - * Probe all GPT partitions for the presence of ZFS pools. We - * return the spa_t for the first we find (if requested). This - * will have the effect of booting from the first pool on the - * disk. - * - * If no vdev is found, GELI decrypting the device and try again - */ - entries_per_sec = DEV_BSIZE / hdr.hdr_entsz; - slba = hdr.hdr_lba_table; - elba = slba + hdr.hdr_entries / entries_per_sec; - while (slba < elba) { - zdsk->dsk.start = 0; - if (drvread(&zdsk->dsk, sec, slba, 1)) - return; - for (part = 0; part < entries_per_sec; part++) { - ent = (struct gpt_ent *)(sec + part * hdr.hdr_entsz); - if (memcmp(&ent->ent_type, &freebsd_zfs_uuid, - sizeof(uuid_t)) == 0) { - zdsk->dsk.start = ent->ent_lba_start; - zdsk->dsk.size = - ent->ent_lba_end - ent->ent_lba_start + 1; - zdsk->dsk.slice = part + 1; - zdsk->dsk.part = 255; - if (vdev_probe(vdev_read2, zdsk, NULL) == 0) { - /* - * This slice had a vdev. We need a new - * dsk structure now since the vdev now - * owns this one. - */ - zdsk = copy_dsk(zdsk); - } -#ifdef LOADER_GELI_SUPPORT - else if ((zdsk->gdev = geli_taste(vdev_read, - zdsk, ent->ent_lba_end - ent->ent_lba_start, - "disk%up%u:", zdsk->dsk.unit, - zdsk->dsk.slice)) != NULL) { - if (geli_havekey(zdsk->gdev) == 0 || - geli_passphrase(zdsk->gdev, gelipw) - == 0) { - /* - * This slice has GELI, - * check it for ZFS. - */ - if (vdev_probe(vdev_read2, - zdsk, NULL) == 0) { - /* - * This slice had a - * vdev. We need a new - * dsk structure now - * since the vdev now - * owns this one. - */ - zdsk = copy_dsk(zdsk); - } - break; - } - } -#endif /* LOADER_GELI_SUPPORT */ - } - } - slba++; - } - return; -trymbr: -#endif /* GPT */ - - if (drvread(&zdsk->dsk, sec, DOSBBSECTOR, 1)) - return; - dp = (void *)(sec + DOSPARTOFF); - - for (i = 0; i < NDOSPART; i++) { - if (!dp[i].dp_typ) - continue; - zdsk->dsk.start = dp[i].dp_start; - zdsk->dsk.size = dp[i].dp_size; - zdsk->dsk.slice = i + 1; - if (vdev_probe(vdev_read2, zdsk, NULL) == 0) { - zdsk = copy_dsk(zdsk); - } -#ifdef LOADER_GELI_SUPPORT - else if ((zdsk->gdev = geli_taste(vdev_read, zdsk, - dp[i].dp_size - dp[i].dp_start, "disk%us%u:")) != NULL) { - if (geli_havekey(zdsk->gdev) == 0 || - geli_passphrase(zdsk->gdev, gelipw) == 0) { - /* - * This slice has GELI, check it for ZFS. - */ - if (vdev_probe(vdev_read2, zdsk, NULL) == 0) { - /* - * This slice had a vdev. We need a new - * dsk structure now since the vdev now - * owns this one. - */ - zdsk = copy_dsk(zdsk); - } - break; - } - } -#endif /* LOADER_GELI_SUPPORT */ - } -} - int main(void) { - dnode_phys_t dn; - off_t off; - struct zfsdsk *zdsk; - int autoboot, i; - int nextboot; - int rc; + unsigned i; + int auto_boot, fd, nextboot = 0; + struct disk_devdesc devdesc; - dmadat = (void *)(roundup2(__base + (int32_t)&_end, 0x10000) - __base); - bios_getmem(); if (high_heap_size > 0) { - heap_end = PTOV(high_heap_base + high_heap_size); - heap_next = PTOV(high_heap_base); + heap_top = PTOV(high_heap_base + high_heap_size); + heap_bottom = PTOV(high_heap_base); } else { - heap_next = (char *)dmadat + sizeof(*dmadat); - heap_end = (char *)PTOV(bios_basemem); + heap_bottom = (char *) + (roundup2(__base + (int32_t)&_end, 0x10000) - __base); + heap_top = (char *)PTOV(bios_basemem); } - setheap(heap_next, heap_end); + setheap(heap_bottom, heap_top); - zdsk = calloc(1, sizeof(struct zfsdsk)); - zdsk->dsk.drive = *(uint8_t *)PTOV(ARGS); - zdsk->dsk.type = zdsk->dsk.drive & DRV_HARD ? TYPE_AD : TYPE_FD; - zdsk->dsk.unit = zdsk->dsk.drive & DRV_MASK; - zdsk->dsk.slice = *(uint8_t *)PTOV(ARGS + 1) + 1; - zdsk->dsk.part = 0; - zdsk->dsk.start = 0; - zdsk->dsk.size = drvsize_ext(zdsk); + /* + * Initialise the block cache. Set the upper limit. + */ + bcache_init(32768, 512); + archsw.arch_autoload = NULL; + archsw.arch_getdev = i386_getdev; + archsw.arch_copyin = NULL; + archsw.arch_copyout = NULL; + archsw.arch_readin = NULL; + archsw.arch_isainb = NULL; + archsw.arch_isaoutb = NULL; + archsw.arch_zfs_probe = i386_zfs_probe; + bootinfo.bi_version = BOOTINFO_VERSION; bootinfo.bi_size = sizeof(bootinfo); bootinfo.bi_basemem = bios_basemem / 1024; bootinfo.bi_extmem = bios_extmem / 1024; bootinfo.bi_memsizes_valid++; - bootinfo.bi_bios_dev = zdsk->dsk.drive; + bootinfo.bi_bios_dev = *(uint8_t *)PTOV(ARGS); - bootdev = MAKEBOOTDEV(dev_maj[zdsk->dsk.type], - zdsk->dsk.slice, zdsk->dsk.unit, zdsk->dsk.part); + /* Set up fall back device name. */ + snprintf(boot_devname, sizeof (boot_devname), "disk%d:", + bd_bios2unit(bootinfo.bi_bios_dev)); - /* Process configuration file */ + for (i = 0; devsw[i] != NULL; i++) + if (devsw[i]->dv_init != NULL) + (devsw[i]->dv_init)(); - autoboot = 1; + disk_parsedev(&devdesc, boot_devname + 4, NULL); - zfs_init(); + bootdev = MAKEBOOTDEV(dev_maj[DEVT_DISK], devdesc.d_slice + 1, + devdesc.dd.d_unit, + devdesc.d_partition >= 0 ? devdesc.d_partition : 0xff); /* - * Probe the boot drive first - we will try to boot from whatever - * pool we find on that drive. + * zfs_fmtdev() can be called only after dv_init */ - probe_drive(zdsk); - - /* - * Probe the rest of the drives that the bios knows about. This - * will find any other available pools and it may fill in missing - * vdevs for the boot pool. - */ -#ifndef VIRTUALBOX - for (i = 0; i < *(unsigned char *)PTOV(BIOS_NUMDRIVES); i++) -#else - for (i = 0; i < MAXBDDEV; i++) -#endif - { - if ((i | DRV_HARD) == *(uint8_t *)PTOV(ARGS)) - continue; - - if (!int13probe(i | DRV_HARD)) - break; - - zdsk = calloc(1, sizeof(struct zfsdsk)); - zdsk->dsk.drive = i | DRV_HARD; - zdsk->dsk.type = zdsk->dsk.drive & TYPE_AD; - zdsk->dsk.unit = i; - zdsk->dsk.slice = 0; - zdsk->dsk.part = 0; - zdsk->dsk.start = 0; - zdsk->dsk.size = drvsize_ext(zdsk); - probe_drive(zdsk); - } - - /* - * The first discovered pool, if any, is the pool. - */ - spa = spa_get_primary(); - if (!spa) { - printf("%s: No ZFS pools located, can't boot\n", BOOTPROG); - for (;;) - ; - } - - primary_spa = spa; - primary_vdev = spa_get_primary_vdev(spa); - - nextboot = 0; - rc = vdev_read_pad2(primary_vdev, cmd, sizeof(cmd)); - if (vdev_clear_pad2(primary_vdev)) - printf("failed to clear pad2 area of primary vdev\n"); - if (rc == 0) { - if (*cmd) { - /* - * We could find an old-style ZFS Boot Block header - * here. Simply ignore it. - */ - if (*(uint64_t *)cmd != 0x2f5b007b10c) { - /* - * Note that parse() is destructive to cmd[] - * and we also want to honor RBX_QUIET option - * that could be present in cmd[]. - */ - nextboot = 1; - memcpy(cmddup, cmd, sizeof(cmd)); - if (parse_cmd()) { - printf("failed to parse pad2 area of " - "primary vdev\n"); - reboot(); - } + if (bdev != NULL && bdev->dd.d_dev->dv_type == DEVT_ZFS) { + /* set up proper device name string for ZFS */ + strncpy(boot_devname, zfs_fmtdev(bdev), sizeof (boot_devname)); + if (zfs_nextboot(bdev, cmd, sizeof(cmd)) == 0) { + nextboot = 1; + memcpy(cmddup, cmd, sizeof(cmd)); + if (parse_cmd()) { if (!OPT_CHECK(RBX_QUIET)) - printf("zfs nextboot: %s\n", cmddup); + printf("failed to parse pad2 area\n"); + exit(0); } + if (!OPT_CHECK(RBX_QUIET)) + printf("zfs nextboot: %s\n", cmddup); /* Do not process this command twice */ *cmd = 0; } - } else - printf("failed to read pad2 area of primary vdev\n"); + } - /* Mount ZFS only if it's not already mounted via nextboot parsing. */ - if (zfsmount.spa == NULL && - (zfs_spa_init(spa) != 0 || zfs_mount(spa, 0, &zfsmount) != 0)) { - printf("%s: failed to mount default pool %s\n", - BOOTPROG, spa->spa_name); - autoboot = 0; - } else if (zfs_lookup(&zfsmount, PATH_CONFIG, &dn) == 0 || - zfs_lookup(&zfsmount, PATH_DOTCONFIG, &dn) == 0) { - off = 0; - zfs_read(spa, &dn, &off, cmd, sizeof(cmd)); + /* now make sure we have bdev on all cases */ + free(bdev); + i386_getdev((void **)&bdev, boot_devname, NULL); + + env_setenv("currdev", EV_VOLATILE, boot_devname, i386_setcurrdev, + env_nounset); + + /* Process configuration file */ + auto_boot = 1; + + fd = open(PATH_CONFIG, O_RDONLY); + if (fd == -1) + fd = open(PATH_DOTCONFIG, O_RDONLY); + + if (fd != -1) { + read(fd, cmd, sizeof (cmd)); + close(fd); } if (*cmd) { /* * Note that parse_cmd() is destructive to cmd[] and we also * want to honor RBX_QUIET option that could be present in * cmd[]. */ memcpy(cmddup, cmd, sizeof(cmd)); if (parse_cmd()) - autoboot = 0; + auto_boot = 0; if (!OPT_CHECK(RBX_QUIET)) printf("%s: %s\n", PATH_CONFIG, cmddup); /* Do not process this command twice */ *cmd = 0; } /* Do not risk waiting at the prompt forever. */ - if (nextboot && !autoboot) - reboot(); + if (nextboot && !auto_boot) + exit(0); - if (autoboot && !*kname) { + if (auto_boot && !*kname) { /* * Iterate through the list of loader and kernel paths, * trying to load. If interrupted by a keypress, or in case of * failure, drop the user to the boot2 prompt. */ for (i = 0; i < nitems(loadpath); i++) { memcpy(kname, loadpath[i].p, loadpath[i].len); if (keyhit(3)) break; load(); } } /* Present the user with the boot2 prompt. */ for (;;) { - if (!autoboot || !OPT_CHECK(RBX_QUIET)) { + if (!auto_boot || !OPT_CHECK(RBX_QUIET)) { printf("\nFreeBSD/x86 boot\n"); - if (zfs_rlookup(spa, zfsmount.rootobj, rootname) != 0) - printf("Default: %s/<0x%llx>:%s\n" - "boot: ", - spa->spa_name, zfsmount.rootobj, kname); - else if (rootname[0] != '\0') - printf("Default: %s/%s:%s\n" - "boot: ", - spa->spa_name, rootname, kname); - else - printf("Default: %s:%s\n" - "boot: ", - spa->spa_name, kname); + printf("Default: %s%s\nboot: ", boot_devname, kname); } if (ioctrl & IO_SERIAL) sio_flush(); - if (!autoboot || keyhit(5)) + if (!auto_boot || keyhit(5)) getstr(cmd, sizeof(cmd)); - else if (!autoboot || !OPT_CHECK(RBX_QUIET)) + else if (!auto_boot || !OPT_CHECK(RBX_QUIET)) putchar('\n'); - autoboot = 0; + auto_boot = 0; if (parse_cmd()) putchar('\a'); else load(); } } /* XXX - Needed for btxld to link the boot2 binary; do not remove. */ void exit(int x) { __exit(x); } -void -reboot(void) -{ - __exit(0); -} - static void load(void) { union { struct exec ex; Elf32_Ehdr eh; } hdr; static Elf32_Phdr ep[2]; static Elf32_Shdr es[2]; caddr_t p; - dnode_phys_t dn; - off_t off; uint32_t addr, x; - int fmt, i, j; + int fd, fmt, i, j; + ssize_t size; - if (zfs_lookup(&zfsmount, kname, &dn)) { + if ((fd = open(kname, O_RDONLY)) == -1) { printf("\nCan't find %s\n", kname); return; } - off = 0; - if (xfsread(&dn, &off, &hdr, sizeof(hdr))) + + size = sizeof(hdr); + if (read(fd, &hdr, sizeof (hdr)) != size) { + close(fd); return; - if (N_GETMAGIC(hdr.ex) == ZMAGIC) + } + if (N_GETMAGIC(hdr.ex) == ZMAGIC) { fmt = 0; - else if (IS_ELF(hdr.eh)) + } else if (IS_ELF(hdr.eh)) { fmt = 1; - else { + } else { printf("Invalid %s\n", "format"); + close(fd); return; } if (fmt == 0) { addr = hdr.ex.a_entry & 0xffffff; p = PTOV(addr); - off = PAGE_SIZE; - if (xfsread(&dn, &off, p, hdr.ex.a_text)) + lseek(fd, PAGE_SIZE, SEEK_SET); + size = hdr.ex.a_text; + if (read(fd, p, hdr.ex.a_text) != size) { + close(fd); return; + } p += roundup2(hdr.ex.a_text, PAGE_SIZE); - if (xfsread(&dn, &off, p, hdr.ex.a_data)) + size = hdr.ex.a_data; + if (read(fd, p, hdr.ex.a_data) != size) { + close(fd); return; + } p += hdr.ex.a_data + roundup2(hdr.ex.a_bss, PAGE_SIZE); bootinfo.bi_symtab = VTOP(p); memcpy(p, &hdr.ex.a_syms, sizeof(hdr.ex.a_syms)); p += sizeof(hdr.ex.a_syms); if (hdr.ex.a_syms) { - if (xfsread(&dn, &off, p, hdr.ex.a_syms)) + size = hdr.ex.a_syms; + if (read(fd, p, hdr.ex.a_syms) != size) { + close(fd); return; + } p += hdr.ex.a_syms; - if (xfsread(&dn, &off, p, sizeof(int))) + size = sizeof (int); + if (read(fd, p, sizeof (int)) != size) { + close(fd); return; + } x = *(uint32_t *)p; p += sizeof(int); x -= sizeof(int); - if (xfsread(&dn, &off, p, x)) + size = x; + if (read(fd, p, x) != size) { + close(fd); return; + } p += x; } } else { - off = hdr.eh.e_phoff; + lseek(fd, hdr.eh.e_phoff, SEEK_SET); for (j = i = 0; i < hdr.eh.e_phnum && j < 2; i++) { - if (xfsread(&dn, &off, ep + j, sizeof(ep[0]))) + size = sizeof (ep[0]); + if (read(fd, ep + j, sizeof (ep[0])) != size) { + close(fd); return; + } if (ep[j].p_type == PT_LOAD) j++; } for (i = 0; i < 2; i++) { p = PTOV(ep[i].p_paddr & 0xffffff); - off = ep[i].p_offset; - if (xfsread(&dn, &off, p, ep[i].p_filesz)) + lseek(fd, ep[i].p_offset, SEEK_SET); + size = ep[i].p_filesz; + if (read(fd, p, ep[i].p_filesz) != size) { + close(fd); return; + } } p += roundup2(ep[1].p_memsz, PAGE_SIZE); bootinfo.bi_symtab = VTOP(p); if (hdr.eh.e_shnum == hdr.eh.e_shstrndx + 3) { - off = hdr.eh.e_shoff + sizeof(es[0]) * - (hdr.eh.e_shstrndx + 1); - if (xfsread(&dn, &off, &es, sizeof(es))) + lseek(fd, hdr.eh.e_shoff + + sizeof (es[0]) * (hdr.eh.e_shstrndx + 1), + SEEK_SET); + size = sizeof(es); + if (read(fd, &es, sizeof (es)) != size) { + close(fd); return; + } for (i = 0; i < 2; i++) { memcpy(p, &es[i].sh_size, sizeof(es[i].sh_size)); p += sizeof(es[i].sh_size); - off = es[i].sh_offset; - if (xfsread(&dn, &off, p, es[i].sh_size)) + lseek(fd, es[i].sh_offset, SEEK_SET); + size = es[i].sh_size; + if (read(fd, p, es[i].sh_size) != size) { + close(fd); return; + } p += es[i].sh_size; } } addr = hdr.eh.e_entry & 0xffffff; } + close(fd); + bootinfo.bi_esymtab = VTOP(p); bootinfo.bi_kernelname = VTOP(kname); - zfsargs.size = sizeof(zfsargs); - zfsargs.pool = zfsmount.spa->spa_guid; - zfsargs.root = zfsmount.rootobj; - zfsargs.primary_pool = primary_spa->spa_guid; #ifdef LOADER_GELI_SUPPORT explicit_bzero(gelipw, sizeof(gelipw)); - export_geli_boot_data(&zfsargs.gelidata); #endif - if (primary_vdev != NULL) - zfsargs.primary_vdev = primary_vdev->v_guid; - else - printf("failed to detect primary vdev\n"); - /* - * Note that the zfsargs struct is passed by value, not by pointer. - * Code in btxldr.S copies the values from the entry stack to a fixed - * location within loader(8) at startup due to the presence of - * KARGS_FLAGS_EXTARG. - */ - __exec((caddr_t)addr, RB_BOOTINFO | (opts & RBX_MASK), - bootdev, - KARGS_FLAGS_ZFS | KARGS_FLAGS_EXTARG, - (uint32_t)spa->spa_guid, - (uint32_t)(spa->spa_guid >> 32), - VTOP(&bootinfo), - zfsargs); + + if (bdev->dd.d_dev->dv_type == DEVT_ZFS) { + zfsargs.size = sizeof(zfsargs); + zfsargs.pool = bdev->d_kind.zfs.pool_guid; + zfsargs.root = bdev->d_kind.zfs.root_guid; +#ifdef LOADER_GELI_SUPPORT + export_geli_boot_data(&zfsargs.gelidata); +#endif + /* + * Note that the zfsargs struct is passed by value, not by + * pointer. Code in btxldr.S copies the values from the entry + * stack to a fixed location within loader(8) at startup due + * to the presence of KARGS_FLAGS_EXTARG. + */ + __exec((caddr_t)addr, RB_BOOTINFO | (opts & RBX_MASK), + bootdev, + KARGS_FLAGS_ZFS | KARGS_FLAGS_EXTARG, + (uint32_t)bdev->d_kind.zfs.pool_guid, + (uint32_t)(bdev->d_kind.zfs.pool_guid >> 32), + VTOP(&bootinfo), + zfsargs); + } else { +#ifdef LOADER_GELI_SUPPORT + geliargs.size = sizeof(geliargs); + export_geli_boot_data(&geliargs.gelidata); +#endif + + /* + * Note that the geliargs struct is passed by value, not by + * pointer. Code in btxldr.S copies the values from the entry + * stack to a fixed location within loader(8) at startup due + * to the presence of the KARGS_FLAGS_EXTARG flag. + */ + __exec((caddr_t)addr, RB_BOOTINFO | (opts & RBX_MASK), + bootdev, +#ifdef LOADER_GELI_SUPPORT + KARGS_FLAGS_GELI | KARGS_FLAGS_EXTARG, 0, 0, + VTOP(&bootinfo), geliargs +#else + 0, 0, 0, VTOP(&bootinfo) +#endif + ); + } } static int -zfs_mount_ds(char *dsname) +mount_root(char *arg) { - uint64_t newroot; - spa_t *newspa; - char *q; + char *root; + struct i386_devdesc *ddesc; + uint8_t part; - q = strchr(dsname, '/'); - if (q) - *q++ = '\0'; - newspa = spa_find_by_name(dsname); - if (newspa == NULL) { - printf("\nCan't find ZFS pool %s\n", dsname); - return (-1); + if (asprintf(&root, "%s:", arg) < 0) + return (1); + + if (i386_getdev((void **)&ddesc, root, NULL)) { + free(root); + return (1); } - if (zfs_spa_init(newspa)) - return (-1); - - newroot = 0; - if (q) { - if (zfs_lookup_dataset(newspa, q, &newroot)) { - printf("\nCan't find dataset %s in ZFS pool %s\n", - q, newspa->spa_name); - return (-1); - } + /* we should have new device descriptor, free old and replace it. */ + free(bdev); + bdev = ddesc; + if (bdev->dd.d_dev->dv_type == DEVT_DISK) { + if (bdev->d_kind.biosdisk.partition == -1) + part = 0xff; + else + part = bdev->d_kind.biosdisk.partition; + bootdev = MAKEBOOTDEV(dev_maj[bdev->dd.d_dev->dv_type], + bdev->d_kind.biosdisk.slice + 1, + bdev->dd.d_unit, part); + bootinfo.bi_bios_dev = bd_unit2bios(bdev); } - if (zfs_mount(newspa, newroot, &zfsmount)) { - printf("\nCan't mount ZFS dataset\n"); - return (-1); - } - spa = newspa; + strncpy(boot_devname, root, sizeof (boot_devname)); + setenv("currdev", root, 1); + free(root); return (0); } +static void +fs_list(char *arg) +{ + int fd; + struct dirent *d; + char line[80]; + + fd = open(arg, O_RDONLY); + if (fd < 0) + return; + pager_open(); + while ((d = readdirfd(fd)) != NULL) { + sprintf(line, "%s\n", d->d_name); + if (pager_output(line)) + break; + } + pager_close(); + close(fd); +} + static int parse_cmd(void) { char *arg = cmd; char *ep, *p, *q; const char *cp; + char line[80]; int c, i, j; while ((c = *arg++)) { if (c == ' ' || c == '\t' || c == '\n') continue; for (p = arg; *p && *p != '\n' && *p != ' ' && *p != '\t'; p++) ; ep = p; if (*p) *p++ = 0; if (c == '-') { while ((c = *arg++)) { if (c == 'P') { if (*(uint8_t *)PTOV(0x496) & 0x10) { cp = "yes"; } else { opts |= OPT_SET(RBX_DUAL); opts |= OPT_SET(RBX_SERIAL); cp = "no"; } printf("Keyboard: %s\n", cp); continue; } else if (c == 'S') { j = 0; while ((unsigned int) (i = *arg++ - '0') <= 9) j = j * 10 + i; if (j > 0 && i == -'0') { comspeed = j; break; } /* * Fall through to error below * ('S' not in optstr[]). */ } for (i = 0; c != optstr[i]; i++) if (i == NOPT - 1) return (-1); opts ^= OPT_SET(flags[i]); } ioctrl = OPT_CHECK(RBX_DUAL) ? (IO_SERIAL|IO_KEYBOARD) : OPT_CHECK(RBX_SERIAL) ? IO_SERIAL : IO_KEYBOARD; if (ioctrl & IO_SERIAL) { if (sio_init(115200 / comspeed) != 0) ioctrl &= ~IO_SERIAL; } } if (c == '?') { - dnode_phys_t dn; - - if (zfs_lookup(&zfsmount, arg, &dn) == 0) { - zap_list(spa, &dn); - } + printf("\n"); + if (*arg == '\0') + arg = (char *)"/"; + fs_list(arg); + zfs_list(arg); return (-1); } else { + char *ptr; + printf("\n"); arg--; /* * Report pool status if the comment is 'status'. Lets * hope no-one wants to load /status as a kernel. */ if (strcmp(arg, "status") == 0) { - spa_all_status(); + pager_open(); + for (i = 0; devsw[i] != NULL; i++) { + if (devsw[i]->dv_print != NULL) { + if (devsw[i]->dv_print(1)) + break; + } else { + snprintf(line, sizeof(line), + "%s: (unknown)\n", + devsw[i]->dv_name); + if (pager_output(line)) + break; + } + } + pager_close(); return (-1); } /* * If there is "zfs:" prefix simply ignore it. */ - if (strncmp(arg, "zfs:", 4) == 0) - arg += 4; + ptr = arg; + if (strncmp(ptr, "zfs:", 4) == 0) + ptr += 4; /* * If there is a colon, switch pools. */ - q = strchr(arg, ':'); + q = strchr(ptr, ':'); if (q) { *q++ = '\0'; - if (zfs_mount_ds(arg) != 0) + if (mount_root(arg) != 0) { return (-1); + } arg = q; } if ((i = ep - arg)) { if ((size_t)i >= sizeof(kname)) return (-1); memcpy(kname, arg, i + 1); } } arg = p; } return (0); +} + +/* + * Probe all disks to discover ZFS pools. The idea is to walk all possible + * disk devices, however, we also need to identify possible boot pool. + * For boot pool detection we have boot disk passed us from BIOS, recorded + * in bootinfo.bi_bios_dev. + */ +static void +i386_zfs_probe(void) +{ + char devname[32]; + int boot_unit; + struct i386_devdesc dev; + uint64_t pool_guid = 0; + + dev.dd.d_dev = &bioshd; + /* Translate bios dev to our unit number. */ + boot_unit = bd_bios2unit(bootinfo.bi_bios_dev); + + /* + * Open all the disks we can find and see if we can reconstruct + * ZFS pools from them. + */ + for (dev.dd.d_unit = 0; bd_unit2bios(&dev) >= 0; dev.dd.d_unit++) { + snprintf(devname, sizeof (devname), "%s%d:", bioshd.dv_name, + dev.dd.d_unit); + /* If this is not boot disk, use generic probe. */ + if (dev.dd.d_unit != boot_unit) + zfs_probe_dev(devname, NULL); + else + zfs_probe_dev(devname, &pool_guid); + + if (pool_guid != 0 && bdev == NULL) { + bdev = malloc(sizeof (struct i386_devdesc)); + bzero(bdev, sizeof (struct i386_devdesc)); + bdev->dd.d_dev = &zfs_dev; + bdev->d_kind.zfs.pool_guid = pool_guid; + } + } } Index: head/stand/libsa/zfs/Makefile.inc =================================================================== --- head/stand/libsa/zfs/Makefile.inc (revision 362430) +++ head/stand/libsa/zfs/Makefile.inc (revision 362431) @@ -1,17 +1,17 @@ # $FreeBSD$ .PATH: ${ZFSSRC} -SRCS+= zfs.c skein.c skein_block.c list.c +SRCS+= zfs.c nvlist.c skein.c skein_block.c list.c # Do not unroll skein loops, reduce code size CFLAGS+= -DSKEIN_LOOP=111 .PATH: ${SYSDIR}/crypto/skein .PATH: ${SYSDIR}/cddl/contrib/opensolaris/uts/common/os CFLAGS+= -I${LDRSRC} CFLAGS+= -I${SYSDIR}/cddl/boot/zfs CFLAGS+= -I${SYSDIR}/cddl/contrib/opensolaris/uts/common CFLAGS+= -I${SYSDIR}/crypto/skein CFLAGS.zfs.c+= -I${SRCTOP}/sys/cddl/contrib/opensolaris/common/lz4 CFLAGS+= -Wformat -Wall Index: head/stand/libsa/zfs/libzfs.h =================================================================== --- head/stand/libsa/zfs/libzfs.h (revision 362430) +++ head/stand/libsa/zfs/libzfs.h (revision 362431) @@ -1,61 +1,134 @@ /*- * Copyright (c) 2012 Andriy Gapon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ +#include + +#ifdef LOADER_GELI_SUPPORT +#include +#endif + #ifndef _BOOT_LIBZFS_H_ #define _BOOT_LIBZFS_H_ #define ZFS_MAXNAMELEN 256 /* * ZFS fully-qualified device descriptor. */ struct zfs_devdesc { struct devdesc dd; /* Must be first. */ uint64_t pool_guid; uint64_t root_guid; }; -#ifdef LOADER_GELI_SUPPORT -#include -#endif +/* nvp implementation version */ +#define NV_VERSION 0 +/* nvlist persistent unique name flags, stored in nvl_nvflags */ +#define NV_UNIQUE_NAME 0x1 +#define NV_UNIQUE_NAME_TYPE 0x2 + +#define NV_ALIGN4(x) (((x) + 3) & ~3) + +/* + * nvlist header. + * nvlist has 4 bytes header followed by version and flags, then nvpairs + * and the list is terminated by double zero. + */ +typedef struct { + char nvh_encoding; + char nvh_endian; + char nvh_reserved1; + char nvh_reserved2; +} nvs_header_t; + +typedef struct { + nvs_header_t nv_header; + size_t nv_asize; + size_t nv_size; + uint8_t *nv_data; + uint8_t *nv_idx; +} nvlist_t; + +/* + * nvpair header. + * nvpair has encoded and decoded size + * name string (size and data) + * data type and number of elements + * data + */ +typedef struct { + unsigned encoded_size; + unsigned decoded_size; +} nvp_header_t; + +/* + * nvlist stream head. + */ +typedef struct { + unsigned nvl_version; + unsigned nvl_nvflag; + nvp_header_t nvl_pair; +} nvs_data_t; + +typedef struct { + unsigned nv_size; + uint8_t nv_data[]; /* NV_ALIGN4(string) */ +} nv_string_t; + +typedef struct { + unsigned nv_type; /* data_type_t */ + unsigned nv_nelem; /* number of elements */ + uint8_t nv_data[]; /* data stream */ +} nv_pair_data_t; + +nvlist_t *nvlist_create(int); +void nvlist_destroy(nvlist_t *); +nvlist_t *nvlist_import(const uint8_t *, char, char); +int nvlist_remove(nvlist_t *, const char *, data_type_t); +void nvlist_print(nvlist_t *, unsigned int); +int nvlist_find(const nvlist_t *, const char *, data_type_t, + int *, void *, int *); +int nvlist_next(nvlist_t *); + int zfs_parsedev(struct zfs_devdesc *dev, const char *devspec, const char **path); char *zfs_fmtdev(void *vdev); +int zfs_nextboot(void *vdev, char *buf, size_t size); int zfs_probe_dev(const char *devname, uint64_t *pool_guid); int zfs_list(const char *name); uint64_t ldi_get_size(void *); void init_zfs_bootenv(const char *currdev); int zfs_bootenv(const char *name); int zfs_belist_add(const char *name, uint64_t __unused); int zfs_set_env(void); extern struct devsw zfs_dev; extern struct fs_ops zfs_fsops; #endif /*_BOOT_LIBZFS_H_*/ Index: head/stand/libsa/zfs/nvlist.c =================================================================== --- head/stand/libsa/zfs/nvlist.c (nonexistent) +++ head/stand/libsa/zfs/nvlist.c (revision 362431) @@ -0,0 +1,601 @@ +/*- + * Copyright 2020 Toomas Soome + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include "libzfs.h" + +typedef struct xdr { + int (*xdr_getint)(const struct xdr *, const void *, int *); +} xdr_t; + +static int xdr_int(const xdr_t *, const void *, int *); +static int mem_int(const xdr_t *, const void *, int *); +static void nvlist_decode_nvlist(const xdr_t *, nvlist_t *); +static int nvlist_size(const xdr_t *, const uint8_t *); + +/* + * transform data from network to host. + */ +xdr_t ntoh = { + .xdr_getint = xdr_int +}; + +/* + * transform data from host to host. + */ +xdr_t native = { + .xdr_getint = mem_int +}; + +/* + * transform data from host to network. + */ +xdr_t hton = { + .xdr_getint = xdr_int +}; + +static int +xdr_short(const xdr_t *xdr, const uint8_t *buf, short *ip) +{ + int i, rv; + + rv = xdr->xdr_getint(xdr, buf, &i); + *ip = i; + return (rv); +} + +static int +xdr_u_short(const xdr_t *xdr, const uint8_t *buf, unsigned short *ip) +{ + unsigned u; + int rv; + + rv = xdr->xdr_getint(xdr, buf, &u); + *ip = u; + return (rv); +} + +static int +xdr_int(const xdr_t *xdr __unused, const void *buf, int *ip) +{ + *ip = be32dec(buf); + return (sizeof(int)); +} + +static int +xdr_u_int(const xdr_t *xdr __unused, const void *buf, unsigned *ip) +{ + *ip = be32dec(buf); + return (sizeof(unsigned)); +} + +static int +xdr_string(const xdr_t *xdr, const void *buf, nv_string_t *s) +{ + int size; + + size = xdr->xdr_getint(xdr, buf, &s->nv_size); + size = NV_ALIGN4(size + s->nv_size); + return (size); +} + +static int +xdr_int64(const xdr_t *xdr, const uint8_t *buf, int64_t *lp) +{ + int hi, rv; + unsigned lo; + + rv = xdr->xdr_getint(xdr, buf, &hi); + rv += xdr->xdr_getint(xdr, buf + rv, &lo); + *lp = (((int64_t)hi) << 32) | lo; + return (rv); +} + +static int +xdr_uint64(const xdr_t *xdr, const uint8_t *buf, uint64_t *lp) +{ + unsigned hi, lo; + int rv; + + rv = xdr->xdr_getint(xdr, buf, &hi); + rv += xdr->xdr_getint(xdr, buf + rv, &lo); + *lp = (((int64_t)hi) << 32) | lo; + return (rv); +} + +static int +xdr_char(const xdr_t *xdr, const uint8_t *buf, char *cp) +{ + int i, rv; + + rv = xdr->xdr_getint(xdr, buf, &i); + *cp = i; + return (rv); +} + +/* + * read native data. + */ +static int +mem_int(const xdr_t *xdr, const void *buf, int *i) +{ + *i = *(int *)buf; + return (sizeof(int)); +} + +void +nvlist_destroy(nvlist_t *nvl) +{ + if (nvl != NULL) { + /* Free data if it was allocated by us. */ + if (nvl->nv_asize > 0) + free(nvl->nv_data); + } + free(nvl); +} + +char * +nvstring_get(nv_string_t *nvs) +{ + char *s; + + s = malloc(nvs->nv_size + 1); + if (s != NULL) { + bcopy(nvs->nv_data, s, nvs->nv_size); + s[nvs->nv_size] = '\0'; + } + return (s); +} + +/* + * Create empty nvlist. + * The nvlist is terminated by 2x zeros (8 bytes). + */ +nvlist_t * +nvlist_create(int flag) +{ + nvlist_t *nvl; + nvs_data_t *nvs; + + nvl = calloc(1, sizeof(*nvl)); + if (nvl == NULL) + return (nvl); + + nvl->nv_header.nvh_encoding = NV_ENCODE_XDR; + nvl->nv_header.nvh_endian = _BYTE_ORDER == _LITTLE_ENDIAN; + + nvl->nv_asize = nvl->nv_size = sizeof(*nvs); + nvs = calloc(1, nvl->nv_asize); + if (nvs == NULL) { + free(nvl); + return (NULL); + } + /* data in nvlist is byte stream */ + nvl->nv_data = (uint8_t *)nvs; + + nvs->nvl_version = NV_VERSION; + nvs->nvl_nvflag = flag; + return (nvl); +} + +static void +nvlist_nvp_decode(const xdr_t *xdr, nvlist_t *nvl, nvp_header_t *nvph) +{ + nv_string_t *nv_string; + nv_pair_data_t *nvp_data; + nvlist_t nvlist; + + nv_string = (nv_string_t *)nvl->nv_idx; + nvl->nv_idx += xdr_string(xdr, &nv_string->nv_size, nv_string); + nvp_data = (nv_pair_data_t *)nvl->nv_idx; + + nvl->nv_idx += xdr_u_int(xdr, &nvp_data->nv_type, &nvp_data->nv_type); + nvl->nv_idx += xdr_u_int(xdr, &nvp_data->nv_nelem, &nvp_data->nv_nelem); + + switch (nvp_data->nv_type) { + case DATA_TYPE_NVLIST: + case DATA_TYPE_NVLIST_ARRAY: + bzero(&nvlist, sizeof (nvlist)); + nvlist.nv_data = &nvp_data->nv_data[0]; + nvlist.nv_idx = nvlist.nv_data; + for (int i = 0; i < nvp_data->nv_nelem; i++) { + nvlist.nv_asize = + nvlist_size(xdr, nvlist.nv_data); + nvlist_decode_nvlist(xdr, &nvlist); + nvl->nv_idx = nvlist.nv_idx; + nvlist.nv_data = nvlist.nv_idx; + } + break; + + case DATA_TYPE_BOOLEAN: + /* BOOLEAN does not take value space */ + break; + case DATA_TYPE_BYTE: + case DATA_TYPE_INT8: + case DATA_TYPE_UINT8: + nvl->nv_idx += xdr_char(xdr, &nvp_data->nv_data[0], + (char *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_INT16: + nvl->nv_idx += xdr_short(xdr, &nvp_data->nv_data[0], + (short *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_UINT16: + nvl->nv_idx += xdr_u_short(xdr, &nvp_data->nv_data[0], + (unsigned short *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_BOOLEAN_VALUE: + case DATA_TYPE_INT32: + nvl->nv_idx += xdr_int(xdr, &nvp_data->nv_data[0], + (int *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_UINT32: + nvl->nv_idx += xdr_u_int(xdr, &nvp_data->nv_data[0], + (unsigned *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_INT64: + nvl->nv_idx += xdr_int64(xdr, &nvp_data->nv_data[0], + (int64_t *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_UINT64: + nvl->nv_idx += xdr_uint64(xdr, &nvp_data->nv_data[0], + (uint64_t *)&nvp_data->nv_data[0]); + break; + + case DATA_TYPE_STRING: + nv_string = (nv_string_t *)&nvp_data->nv_data[0]; + nvl->nv_idx += xdr_string(xdr, &nvp_data->nv_data[0], + nv_string); + + break; + } +} + +static void +nvlist_decode_nvlist(const xdr_t *xdr, nvlist_t *nvl) +{ + nvp_header_t *nvph; + nvs_data_t *nvs = (nvs_data_t *)nvl->nv_data; + + nvl->nv_idx = nvl->nv_data; + nvl->nv_idx += xdr->xdr_getint(xdr, (const uint8_t *)&nvs->nvl_version, + &nvs->nvl_version); + nvl->nv_idx += xdr->xdr_getint(xdr, (const uint8_t *)&nvs->nvl_nvflag, + &nvs->nvl_nvflag); + + nvph = &nvs->nvl_pair; + nvl->nv_idx += xdr->xdr_getint(xdr, + (const uint8_t *)&nvph->encoded_size, &nvph->encoded_size); + nvl->nv_idx += xdr->xdr_getint(xdr, + (const uint8_t *)&nvph->decoded_size, &nvph->decoded_size); + + while (nvph->encoded_size && nvph->decoded_size) { + nvlist_nvp_decode(xdr, nvl, nvph); + + nvph = (nvp_header_t *)(nvl->nv_idx); + nvl->nv_idx += xdr->xdr_getint(xdr, &nvph->encoded_size, + &nvph->encoded_size); + nvl->nv_idx += xdr->xdr_getint(xdr, &nvph->decoded_size, + &nvph->decoded_size); + } +} + +static int +nvlist_size(const xdr_t *xdr, const uint8_t *stream) +{ + const uint8_t *p, *pair; + unsigned encoded_size, decoded_size; + + p = stream; + p += 2 * sizeof(unsigned); + + pair = p; + p += xdr->xdr_getint(xdr, p, &encoded_size); + p += xdr->xdr_getint(xdr, p, &decoded_size); + while (encoded_size && decoded_size) { + p = pair + encoded_size; + pair = p; + p += xdr->xdr_getint(xdr, p, &encoded_size); + p += xdr->xdr_getint(xdr, p, &decoded_size); + } + return (p - stream); +} + +/* + * Import nvlist from byte stream. + * Determine the stream size and allocate private copy. + * Then translate the data. + */ +nvlist_t * +nvlist_import(const uint8_t *stream, char encoding, char endian) +{ + nvlist_t *nvl; + + if (encoding != NV_ENCODE_XDR) + return (NULL); + + nvl = malloc(sizeof(*nvl)); + if (nvl == NULL) + return (nvl); + + nvl->nv_asize = nvl->nv_size = nvlist_size(&ntoh, stream); + nvl->nv_data = malloc(nvl->nv_asize); + if (nvl->nv_data == NULL) { + free(nvl); + return (NULL); + } + nvl->nv_idx = nvl->nv_data; + bcopy(stream, nvl->nv_data, nvl->nv_asize); + + nvlist_decode_nvlist(&ntoh, nvl); + nvl->nv_idx = nvl->nv_data; + return (nvl); +} + +/* + * remove pair from this nvlist. + */ +int +nvlist_remove(nvlist_t *nvl, const char *name, data_type_t type) +{ + uint8_t *head, *tail; + nvs_data_t *data; + nvp_header_t *nvp; + nv_string_t *nvp_name; + nv_pair_data_t *nvp_data; + size_t size; + + if (nvl == NULL || nvl->nv_data == NULL || name == NULL) + return (EINVAL); + + head = nvl->nv_data; + data = (nvs_data_t *)head; + nvp = &data->nvl_pair; /* first pair in nvlist */ + head = (uint8_t *)nvp; + + while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { + nvp_name = (nv_string_t *)(head + sizeof(*nvp)); + + nvp_data = (nv_pair_data_t *) + NV_ALIGN4((uintptr_t)&nvp_name->nv_data[0] + + nvp_name->nv_size); + + if (memcmp(nvp_name->nv_data, name, nvp_name->nv_size) == 0 && + nvp_data->nv_type == type) { + /* + * set tail to point to next nvpair and size + * is the length of the tail. + */ + tail = head + nvp->encoded_size; + size = nvl->nv_data + nvl->nv_size - tail; + + /* adjust the size of the nvlist. */ + nvl->nv_size -= nvp->encoded_size; + bcopy(tail, head, size); + return (0); + } + /* Not our pair, skip to next. */ + head = head + nvp->encoded_size; + nvp = (nvp_header_t *)head; + } + return (ENOENT); +} + +int +nvlist_find(const nvlist_t *nvl, const char *name, data_type_t type, + int *elementsp, void *valuep, int *sizep) +{ + nvs_data_t *data; + nvp_header_t *nvp; + nv_string_t *nvp_name; + nv_pair_data_t *nvp_data; + nvlist_t *nvlist; + + if (nvl == NULL || nvl->nv_data == NULL || name == NULL) + return (EINVAL); + + data = (nvs_data_t *)nvl->nv_data; + nvp = &data->nvl_pair; /* first pair in nvlist */ + + while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { + nvp_name = (nv_string_t *)((uint8_t *)nvp + sizeof(*nvp)); + + nvp_data = (nv_pair_data_t *) + NV_ALIGN4((uintptr_t)&nvp_name->nv_data[0] + + nvp_name->nv_size); + + if (memcmp(nvp_name->nv_data, name, nvp_name->nv_size) == 0 && + nvp_data->nv_type == type) { + if (elementsp != NULL) + *elementsp = nvp_data->nv_nelem; + switch (nvp_data->nv_type) { + case DATA_TYPE_UINT64: + *(uint64_t *)valuep = + *(uint64_t *)nvp_data->nv_data; + return (0); + case DATA_TYPE_STRING: + nvp_name = (nv_string_t *)nvp_data->nv_data; + if (sizep != NULL) { + *sizep = nvp_name->nv_size; + } + *(const uint8_t **)valuep = + &nvp_name->nv_data[0]; + return (0); + case DATA_TYPE_NVLIST: + case DATA_TYPE_NVLIST_ARRAY: + nvlist = malloc(sizeof(*nvlist)); + if (nvlist != NULL) { + nvlist->nv_header = nvl->nv_header; + nvlist->nv_asize = 0; + nvlist->nv_size = 0; + nvlist->nv_idx = NULL; + nvlist->nv_data = &nvp_data->nv_data[0]; + *(nvlist_t **)valuep = nvlist; + return (0); + } + return (ENOMEM); + } + return (EIO); + } + /* Not our pair, skip to next. */ + nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); + } + return (ENOENT); +} + +/* + * Return the next nvlist in an nvlist array. + */ +int +nvlist_next(nvlist_t *nvl) +{ + nvs_data_t *data; + nvp_header_t *nvp; + + if (nvl == NULL || nvl->nv_data == NULL || nvl->nv_asize != 0) + return (EINVAL); + + data = (nvs_data_t *)nvl->nv_data; + nvp = &data->nvl_pair; /* first pair in nvlist */ + + while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { + nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); + } + nvl->nv_data = (uint8_t *)nvp + sizeof(*nvp); + return (0); +} + +void +nvlist_print(nvlist_t *nvl, unsigned int indent) +{ + static const char *typenames[] = { + "DATA_TYPE_UNKNOWN", + "DATA_TYPE_BOOLEAN", + "DATA_TYPE_BYTE", + "DATA_TYPE_INT16", + "DATA_TYPE_UINT16", + "DATA_TYPE_INT32", + "DATA_TYPE_UINT32", + "DATA_TYPE_INT64", + "DATA_TYPE_UINT64", + "DATA_TYPE_STRING", + "DATA_TYPE_BYTE_ARRAY", + "DATA_TYPE_INT16_ARRAY", + "DATA_TYPE_UINT16_ARRAY", + "DATA_TYPE_INT32_ARRAY", + "DATA_TYPE_UINT32_ARRAY", + "DATA_TYPE_INT64_ARRAY", + "DATA_TYPE_UINT64_ARRAY", + "DATA_TYPE_STRING_ARRAY", + "DATA_TYPE_HRTIME", + "DATA_TYPE_NVLIST", + "DATA_TYPE_NVLIST_ARRAY", + "DATA_TYPE_BOOLEAN_VALUE", + "DATA_TYPE_INT8", + "DATA_TYPE_UINT8", + "DATA_TYPE_BOOLEAN_ARRAY", + "DATA_TYPE_INT8_ARRAY", + "DATA_TYPE_UINT8_ARRAY" + }; + nvs_data_t *data; + nvp_header_t *nvp; + nv_string_t *nvp_name; + nv_pair_data_t *nvp_data; + nvlist_t nvlist; + int i, j; + + data = (nvs_data_t *)nvl->nv_data; + nvp = &data->nvl_pair; /* first pair in nvlist */ + while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { + nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp)); + nvp_data = (nv_pair_data_t *) + NV_ALIGN4((uintptr_t)&nvp_name->nv_data[0] + + nvp_name->nv_size); + + for (int i = 0; i < indent; i++) + printf(" "); + + printf("%s [%d] %.*s", typenames[nvp_data->nv_type], + nvp_data->nv_nelem, nvp_name->nv_size, nvp_name->nv_data); + + switch (nvp_data->nv_type) { + case DATA_TYPE_UINT64: { + uint64_t val; + + val = *(uint64_t *)nvp_data->nv_data; + printf(" = 0x%jx\n", (uintmax_t)val); + break; + } + + case DATA_TYPE_STRING: { + nvp_name = (nv_string_t *)&nvp_data->nv_data[0]; + printf(" = \"%.*s\"\n", nvp_name->nv_size, + nvp_name->nv_data ); + break; + } + + case DATA_TYPE_NVLIST: + printf("\n"); + nvlist.nv_data = &nvp_data->nv_data[0]; + nvlist_print(&nvlist, indent + 2); + break; + + case DATA_TYPE_NVLIST_ARRAY: + nvlist.nv_data = &nvp_data->nv_data[0]; + for (j = 0; j < nvp_data->nv_nelem; j++) { + data = (nvs_data_t *)nvlist.nv_data; + printf("[%d]\n", j); + nvlist_print(&nvlist, indent + 2); + if (j != nvp_data->nv_nelem - 1) { + for (i = 0; i < indent; i++) + printf(" "); + printf("%s %.*s", + typenames[nvp_data->nv_type], + nvp_name->nv_size, + nvp_name->nv_data); + } + nvlist.nv_data = (uint8_t *)data + + nvlist_size(&native, nvlist.nv_data); + } + break; + + default: + printf("\n"); + } + nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); + } + printf("%*s\n", indent + 13, "End of nvlist"); +} Property changes on: head/stand/libsa/zfs/nvlist.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/stand/libsa/zfs/zfs.c =================================================================== --- head/stand/libsa/zfs/zfs.c (revision 362430) +++ head/stand/libsa/zfs/zfs.c (revision 362431) @@ -1,1096 +1,1356 @@ /*- * Copyright (c) 2007 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* * Stand-alone file reading package. */ #include #include #include #include #include #include #include #include #include #include #include #include "libzfs.h" #include "zfsimpl.c" /* Define the range of indexes to be populated with ZFS Boot Environments */ #define ZFS_BE_FIRST 4 #define ZFS_BE_LAST 8 static int zfs_open(const char *path, struct open_file *f); static int zfs_close(struct open_file *f); static int zfs_read(struct open_file *f, void *buf, size_t size, size_t *resid); static off_t zfs_seek(struct open_file *f, off_t offset, int where); static int zfs_stat(struct open_file *f, struct stat *sb); static int zfs_readdir(struct open_file *f, struct dirent *d); static void zfs_bootenv_initial(const char *); struct devsw zfs_dev; struct fs_ops zfs_fsops = { "zfs", zfs_open, zfs_close, zfs_read, null_write, zfs_seek, zfs_stat, zfs_readdir }; /* * In-core open file. */ struct file { off_t f_seekp; /* seek pointer */ dnode_phys_t f_dnode; uint64_t f_zap_type; /* zap type for readdir */ uint64_t f_num_leafs; /* number of fzap leaf blocks */ zap_leaf_phys_t *f_zap_leaf; /* zap leaf buffer */ }; static int zfs_env_index; static int zfs_env_count; SLIST_HEAD(zfs_be_list, zfs_be_entry) zfs_be_head = SLIST_HEAD_INITIALIZER(zfs_be_head); struct zfs_be_list *zfs_be_headp; struct zfs_be_entry { char *name; SLIST_ENTRY(zfs_be_entry) entries; } *zfs_be, *zfs_be_tmp; /* * Open a file. */ static int zfs_open(const char *upath, struct open_file *f) { struct zfsmount *mount = (struct zfsmount *)f->f_devdata; struct file *fp; int rc; if (f->f_dev != &zfs_dev) return (EINVAL); /* allocate file system specific data structure */ fp = calloc(1, sizeof(struct file)); if (fp == NULL) return (ENOMEM); f->f_fsdata = fp; rc = zfs_lookup(mount, upath, &fp->f_dnode); fp->f_seekp = 0; if (rc) { f->f_fsdata = NULL; free(fp); } return (rc); } static int zfs_close(struct open_file *f) { struct file *fp = (struct file *)f->f_fsdata; dnode_cache_obj = NULL; f->f_fsdata = NULL; free(fp); return (0); } /* * Copy a portion of a file into kernel memory. * Cross block boundaries when necessary. */ static int zfs_read(struct open_file *f, void *start, size_t size, size_t *resid /* out */) { const spa_t *spa = ((struct zfsmount *)f->f_devdata)->spa; struct file *fp = (struct file *)f->f_fsdata; struct stat sb; size_t n; int rc; rc = zfs_stat(f, &sb); if (rc) return (rc); n = size; if (fp->f_seekp + n > sb.st_size) n = sb.st_size - fp->f_seekp; rc = dnode_read(spa, &fp->f_dnode, fp->f_seekp, start, n); if (rc) return (rc); if (0) { int i; for (i = 0; i < n; i++) putchar(((char*) start)[i]); } fp->f_seekp += n; if (resid) *resid = size - n; return (0); } static off_t zfs_seek(struct open_file *f, off_t offset, int where) { struct file *fp = (struct file *)f->f_fsdata; switch (where) { case SEEK_SET: fp->f_seekp = offset; break; case SEEK_CUR: fp->f_seekp += offset; break; case SEEK_END: { struct stat sb; int error; error = zfs_stat(f, &sb); if (error != 0) { errno = error; return (-1); } fp->f_seekp = sb.st_size - offset; break; } default: errno = EINVAL; return (-1); } return (fp->f_seekp); } static int zfs_stat(struct open_file *f, struct stat *sb) { const spa_t *spa = ((struct zfsmount *)f->f_devdata)->spa; struct file *fp = (struct file *)f->f_fsdata; return (zfs_dnode_stat(spa, &fp->f_dnode, sb)); } static int zfs_readdir(struct open_file *f, struct dirent *d) { const spa_t *spa = ((struct zfsmount *)f->f_devdata)->spa; struct file *fp = (struct file *)f->f_fsdata; mzap_ent_phys_t mze; struct stat sb; size_t bsize = fp->f_dnode.dn_datablkszsec << SPA_MINBLOCKSHIFT; int rc; rc = zfs_stat(f, &sb); if (rc) return (rc); if (!S_ISDIR(sb.st_mode)) return (ENOTDIR); /* * If this is the first read, get the zap type. */ if (fp->f_seekp == 0) { rc = dnode_read(spa, &fp->f_dnode, 0, &fp->f_zap_type, sizeof(fp->f_zap_type)); if (rc) return (rc); if (fp->f_zap_type == ZBT_MICRO) { fp->f_seekp = offsetof(mzap_phys_t, mz_chunk); } else { rc = dnode_read(spa, &fp->f_dnode, offsetof(zap_phys_t, zap_num_leafs), &fp->f_num_leafs, sizeof(fp->f_num_leafs)); if (rc) return (rc); fp->f_seekp = bsize; fp->f_zap_leaf = malloc(bsize); if (fp->f_zap_leaf == NULL) return (ENOMEM); rc = dnode_read(spa, &fp->f_dnode, fp->f_seekp, fp->f_zap_leaf, bsize); if (rc) return (rc); } } if (fp->f_zap_type == ZBT_MICRO) { mzap_next: if (fp->f_seekp >= bsize) return (ENOENT); rc = dnode_read(spa, &fp->f_dnode, fp->f_seekp, &mze, sizeof(mze)); if (rc) return (rc); fp->f_seekp += sizeof(mze); if (!mze.mze_name[0]) goto mzap_next; d->d_fileno = ZFS_DIRENT_OBJ(mze.mze_value); d->d_type = ZFS_DIRENT_TYPE(mze.mze_value); strcpy(d->d_name, mze.mze_name); d->d_namlen = strlen(d->d_name); return (0); } else { zap_leaf_t zl; zap_leaf_chunk_t *zc, *nc; int chunk; size_t namelen; char *p; uint64_t value; /* * Initialise this so we can use the ZAP size * calculating macros. */ zl.l_bs = ilog2(bsize); zl.l_phys = fp->f_zap_leaf; /* * Figure out which chunk we are currently looking at * and consider seeking to the next leaf. We use the * low bits of f_seekp as a simple chunk index. */ fzap_next: chunk = fp->f_seekp & (bsize - 1); if (chunk == ZAP_LEAF_NUMCHUNKS(&zl)) { fp->f_seekp = rounddown2(fp->f_seekp, bsize) + bsize; chunk = 0; /* * Check for EOF and read the new leaf. */ if (fp->f_seekp >= bsize * fp->f_num_leafs) return (ENOENT); rc = dnode_read(spa, &fp->f_dnode, fp->f_seekp, fp->f_zap_leaf, bsize); if (rc) return (rc); } zc = &ZAP_LEAF_CHUNK(&zl, chunk); fp->f_seekp++; if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) goto fzap_next; namelen = zc->l_entry.le_name_numints; if (namelen > sizeof(d->d_name)) namelen = sizeof(d->d_name); /* * Paste the name back together. */ nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); p = d->d_name; while (namelen > 0) { int len; len = namelen; if (len > ZAP_LEAF_ARRAY_BYTES) len = ZAP_LEAF_ARRAY_BYTES; memcpy(p, nc->l_array.la_array, len); p += len; namelen -= len; nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); } d->d_name[sizeof(d->d_name) - 1] = 0; /* * Assume the first eight bytes of the value are * a uint64_t. */ value = fzap_leaf_value(&zl, zc); d->d_fileno = ZFS_DIRENT_OBJ(value); d->d_type = ZFS_DIRENT_TYPE(value); d->d_namlen = strlen(d->d_name); return (0); } } static int vdev_read(vdev_t *vdev, void *priv, off_t offset, void *buf, size_t bytes) { int fd, ret; size_t res, head, tail, total_size, full_sec_size; unsigned secsz, do_tail_read; off_t start_sec; char *outbuf, *bouncebuf; fd = (uintptr_t) priv; outbuf = (char *) buf; bouncebuf = NULL; ret = ioctl(fd, DIOCGSECTORSIZE, &secsz); if (ret != 0) return (ret); /* * Handling reads of arbitrary offset and size - multi-sector case * and single-sector case. * * Multi-sector Case * (do_tail_read = true if tail > 0) * * |<----------------------total_size--------------------->| * | | * |<--head-->|<--------------bytes------------>|<--tail-->| * | | | | * | | |<~full_sec_size~>| | | * +------------------+ +------------------+ * | |0101010| . . . |0101011| | * +------------------+ +------------------+ * start_sec start_sec + n * * * Single-sector Case * (do_tail_read = false) * * |<------total_size = secsz----->| * | | * |<-head->|<---bytes--->|<-tail->| * +-------------------------------+ * | |0101010101010| | * +-------------------------------+ * start_sec */ start_sec = offset / secsz; head = offset % secsz; total_size = roundup2(head + bytes, secsz); tail = total_size - (head + bytes); do_tail_read = ((tail > 0) && (head + bytes > secsz)); full_sec_size = total_size; if (head > 0) full_sec_size -= secsz; if (do_tail_read) full_sec_size -= secsz; /* Return of partial sector data requires a bounce buffer. */ if ((head > 0) || do_tail_read || bytes < secsz) { bouncebuf = malloc(secsz); if (bouncebuf == NULL) { printf("vdev_read: out of memory\n"); return (ENOMEM); } } if (lseek(fd, start_sec * secsz, SEEK_SET) == -1) { ret = errno; goto error; } /* Partial data return from first sector */ if (head > 0) { res = read(fd, bouncebuf, secsz); if (res != secsz) { ret = EIO; goto error; } memcpy(outbuf, bouncebuf + head, min(secsz - head, bytes)); outbuf += min(secsz - head, bytes); } /* * Full data return from read sectors. * Note, there is still corner case where we read * from sector boundary, but less than sector size, e.g. reading 512B * from 4k sector. */ if (full_sec_size > 0) { if (bytes < full_sec_size) { res = read(fd, bouncebuf, secsz); if (res != secsz) { ret = EIO; goto error; } memcpy(outbuf, bouncebuf, bytes); } else { res = read(fd, outbuf, full_sec_size); if (res != full_sec_size) { ret = EIO; goto error; } outbuf += full_sec_size; } } /* Partial data return from last sector */ if (do_tail_read) { res = read(fd, bouncebuf, secsz); if (res != secsz) { ret = EIO; goto error; } memcpy(outbuf, bouncebuf, secsz - tail); } ret = 0; error: free(bouncebuf); return (ret); } static int +vdev_write(vdev_t *vdev __unused, void *priv, off_t offset, void *buf, + size_t bytes) +{ + int fd, ret; + size_t head, tail, total_size, full_sec_size; + unsigned secsz, do_tail_write; + off_t start_sec; + ssize_t res; + char *outbuf, *bouncebuf; + + fd = (uintptr_t)priv; + outbuf = (char *) buf; + bouncebuf = NULL; + + ret = ioctl(fd, DIOCGSECTORSIZE, &secsz); + if (ret != 0) + return (ret); + + start_sec = offset / secsz; + head = offset % secsz; + total_size = roundup2(head + bytes, secsz); + tail = total_size - (head + bytes); + do_tail_write = ((tail > 0) && (head + bytes > secsz)); + full_sec_size = total_size; + if (head > 0) + full_sec_size -= secsz; + if (do_tail_write) + full_sec_size -= secsz; + + /* Partial sector write requires a bounce buffer. */ + if ((head > 0) || do_tail_write || bytes < secsz) { + bouncebuf = malloc(secsz); + if (bouncebuf == NULL) { + printf("vdev_write: out of memory\n"); + return (ENOMEM); + } + } + + if (lseek(fd, start_sec * secsz, SEEK_SET) == -1) { + ret = errno; + goto error; + } + + /* Partial data for first sector */ + if (head > 0) { + res = read(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + memcpy(bouncebuf + head, outbuf, min(secsz - head, bytes)); + (void) lseek(fd, -secsz, SEEK_CUR); + res = write(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + outbuf += min(secsz - head, bytes); + } + + /* + * Full data write to sectors. + * Note, there is still corner case where we write + * to sector boundary, but less than sector size, e.g. write 512B + * to 4k sector. + */ + if (full_sec_size > 0) { + if (bytes < full_sec_size) { + res = read(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + memcpy(bouncebuf, outbuf, bytes); + (void) lseek(fd, -secsz, SEEK_CUR); + res = write(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + } else { + res = write(fd, outbuf, full_sec_size); + if (res != full_sec_size) { + ret = EIO; + goto error; + } + outbuf += full_sec_size; + } + } + + /* Partial data write to last sector */ + if (do_tail_write) { + res = read(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + memcpy(bouncebuf, outbuf, secsz - tail); + (void) lseek(fd, -secsz, SEEK_CUR); + res = write(fd, bouncebuf, secsz); + if (res != secsz) { + ret = EIO; + goto error; + } + } + + ret = 0; +error: + free(bouncebuf); + return (ret); +} + +static void +vdev_clear_pad2(vdev_t *vdev) +{ + vdev_t *kid; + vdev_boot_envblock_t *be; + off_t off = offsetof(vdev_label_t, vl_be); + zio_checksum_info_t *ci; + zio_cksum_t cksum; + + STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { + if (kid->v_state != VDEV_STATE_HEALTHY) + continue; + vdev_clear_pad2(kid); + } + + if (!STAILQ_EMPTY(&vdev->v_children)) + return; + + be = calloc(1, sizeof (*be)); + if (be == NULL) { + printf("failed to clear be area: out of memory\n"); + return; + } + + ci = &zio_checksum_table[ZIO_CHECKSUM_LABEL]; + be->vbe_zbt.zec_magic = ZEC_MAGIC; + zio_checksum_label_verifier(&be->vbe_zbt.zec_cksum, off); + ci->ci_func[0](be, sizeof (*be), NULL, &cksum); + be->vbe_zbt.zec_cksum = cksum; + + if (vdev_write(vdev, vdev->v_read_priv, off, be, VDEV_PAD_SIZE)) { + printf("failed to clear be area of primary vdev: %d\n", + errno); + } + free(be); +} + +/* + * Read the next boot command from pad2. + * If any instance of pad2 is set to empty string, or the returned string + * values are not the same, we consider next boot not to be set. + */ +static char * +vdev_read_pad2(vdev_t *vdev) +{ + vdev_t *kid; + char *tmp, *result = NULL; + vdev_boot_envblock_t *be; + off_t off = offsetof(vdev_label_t, vl_be); + + STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { + if (kid->v_state != VDEV_STATE_HEALTHY) + continue; + tmp = vdev_read_pad2(kid); + if (tmp == NULL) + continue; + + /* The next boot is not set, we are done. */ + if (*tmp == '\0') { + free(result); + return (tmp); + } + if (result == NULL) { + result = tmp; + continue; + } + /* Are the next boot strings different? */ + if (strcmp(result, tmp) != 0) { + free(tmp); + *result = '\0'; + break; + } + free(tmp); + } + if (result != NULL) + return (result); + + be = malloc(sizeof (*be)); + if (be == NULL) + return (NULL); + + if (vdev_read(vdev, vdev->v_read_priv, off, be, sizeof (*be))) { + return (NULL); + } + + switch (be->vbe_version) { + case VB_RAW: + case VB_NVLIST: + result = strdup(be->vbe_bootenv); + default: + /* Backward compatibility with initial nextboot feaure. */ + result = strdup((char *)be); + } + return (result); +} + +static int zfs_dev_init(void) { spa_t *spa; spa_t *next; spa_t *prev; zfs_init(); if (archsw.arch_zfs_probe == NULL) return (ENXIO); archsw.arch_zfs_probe(); prev = NULL; spa = STAILQ_FIRST(&zfs_pools); while (spa != NULL) { next = STAILQ_NEXT(spa, spa_link); if (zfs_spa_init(spa)) { if (prev == NULL) STAILQ_REMOVE_HEAD(&zfs_pools, spa_link); else STAILQ_REMOVE_AFTER(&zfs_pools, prev, spa_link); } else prev = spa; spa = next; } return (0); } struct zfs_probe_args { int fd; const char *devname; uint64_t *pool_guid; u_int secsz; }; static int zfs_diskread(void *arg, void *buf, size_t blocks, uint64_t offset) { struct zfs_probe_args *ppa; ppa = (struct zfs_probe_args *)arg; return (vdev_read(NULL, (void *)(uintptr_t)ppa->fd, offset * ppa->secsz, buf, blocks * ppa->secsz)); } static int zfs_probe(int fd, uint64_t *pool_guid) { spa_t *spa; int ret; spa = NULL; ret = vdev_probe(vdev_read, (void *)(uintptr_t)fd, &spa); if (ret == 0 && pool_guid != NULL) *pool_guid = spa->spa_guid; return (ret); } static int zfs_probe_partition(void *arg, const char *partname, const struct ptable_entry *part) { struct zfs_probe_args *ppa, pa; struct ptable *table; char devname[32]; int ret; /* Probe only freebsd-zfs and freebsd partitions */ if (part->type != PART_FREEBSD && part->type != PART_FREEBSD_ZFS) return (0); ppa = (struct zfs_probe_args *)arg; strncpy(devname, ppa->devname, strlen(ppa->devname) - 1); devname[strlen(ppa->devname) - 1] = '\0'; sprintf(devname, "%s%s:", devname, partname); - pa.fd = open(devname, O_RDONLY); + pa.fd = open(devname, O_RDWR); if (pa.fd == -1) return (0); ret = zfs_probe(pa.fd, ppa->pool_guid); if (ret == 0) return (0); /* Do we have BSD label here? */ if (part->type == PART_FREEBSD) { pa.devname = devname; pa.pool_guid = ppa->pool_guid; pa.secsz = ppa->secsz; table = ptable_open(&pa, part->end - part->start + 1, ppa->secsz, zfs_diskread); if (table != NULL) { ptable_iterate(table, &pa, zfs_probe_partition); ptable_close(table); } } close(pa.fd); return (0); } int +zfs_nextboot(void *vdev, char *buf, size_t size) +{ + struct zfs_devdesc *dev = (struct zfs_devdesc *)vdev; + spa_t *spa; + vdev_t *vd; + char *result = NULL; + + if (dev->dd.d_dev->dv_type != DEVT_ZFS) + return (1); + + if (dev->pool_guid == 0) + spa = STAILQ_FIRST(&zfs_pools); + else + spa = spa_find_by_guid(dev->pool_guid); + + if (spa == NULL) { + printf("ZFS: can't find pool by guid\n"); + return (1); + } + + STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, v_childlink) { + char *tmp = vdev_read_pad2(vd); + + /* Continue on error. */ + if (tmp == NULL) + continue; + /* Nextboot is not set. */ + if (*tmp == '\0') { + free(result); + free(tmp); + return (1); + } + if (result == NULL) { + result = tmp; + continue; + } + free(tmp); + } + if (result == NULL) + return (1); + + STAILQ_FOREACH(vd, &spa->spa_root_vdev->v_children, v_childlink) { + vdev_clear_pad2(vd); + } + + strlcpy(buf, result, size); + free(result); + return (0); +} + +int zfs_probe_dev(const char *devname, uint64_t *pool_guid) { struct disk_devdesc *dev; struct ptable *table; struct zfs_probe_args pa; uint64_t mediasz; int ret; if (pool_guid) *pool_guid = 0; - pa.fd = open(devname, O_RDONLY); + pa.fd = open(devname, O_RDWR); if (pa.fd == -1) return (ENXIO); /* * We will not probe the whole disk, we can not boot from such * disks and some systems will misreport the disk sizes and will * hang while accessing the disk. */ if (archsw.arch_getdev((void **)&dev, devname, NULL) == 0) { int partition = dev->d_partition; int slice = dev->d_slice; free(dev); if (partition != D_PARTNONE && slice != D_SLICENONE) { ret = zfs_probe(pa.fd, pool_guid); if (ret == 0) return (0); } } /* Probe each partition */ ret = ioctl(pa.fd, DIOCGMEDIASIZE, &mediasz); if (ret == 0) ret = ioctl(pa.fd, DIOCGSECTORSIZE, &pa.secsz); if (ret == 0) { pa.devname = devname; pa.pool_guid = pool_guid; table = ptable_open(&pa, mediasz / pa.secsz, pa.secsz, zfs_diskread); if (table != NULL) { ptable_iterate(table, &pa, zfs_probe_partition); ptable_close(table); } } close(pa.fd); if (pool_guid && *pool_guid == 0) ret = ENXIO; return (ret); } /* * Print information about ZFS pools */ static int zfs_dev_print(int verbose) { spa_t *spa; char line[80]; int ret = 0; if (STAILQ_EMPTY(&zfs_pools)) return (0); printf("%s devices:", zfs_dev.dv_name); if ((ret = pager_output("\n")) != 0) return (ret); if (verbose) { return (spa_all_status()); } STAILQ_FOREACH(spa, &zfs_pools, spa_link) { snprintf(line, sizeof(line), " zfs:%s\n", spa->spa_name); ret = pager_output(line); if (ret != 0) break; } return (ret); } /* * Attempt to open the pool described by (dev) for use by (f). */ static int zfs_dev_open(struct open_file *f, ...) { va_list args; struct zfs_devdesc *dev; struct zfsmount *mount; spa_t *spa; int rv; va_start(args, f); dev = va_arg(args, struct zfs_devdesc *); va_end(args); if (dev->pool_guid == 0) spa = STAILQ_FIRST(&zfs_pools); else spa = spa_find_by_guid(dev->pool_guid); if (!spa) return (ENXIO); mount = malloc(sizeof(*mount)); if (mount == NULL) rv = ENOMEM; else rv = zfs_mount(spa, dev->root_guid, mount); if (rv != 0) { free(mount); return (rv); } if (mount->objset.os_type != DMU_OST_ZFS) { printf("Unexpected object set type %ju\n", (uintmax_t)mount->objset.os_type); free(mount); return (EIO); } f->f_devdata = mount; free(dev); return (0); } static int zfs_dev_close(struct open_file *f) { free(f->f_devdata); f->f_devdata = NULL; return (0); } static int zfs_dev_strategy(void *devdata, int rw, daddr_t dblk, size_t size, char *buf, size_t *rsize) { return (ENOSYS); } struct devsw zfs_dev = { .dv_name = "zfs", .dv_type = DEVT_ZFS, .dv_init = zfs_dev_init, .dv_strategy = zfs_dev_strategy, .dv_open = zfs_dev_open, .dv_close = zfs_dev_close, .dv_ioctl = noioctl, .dv_print = zfs_dev_print, .dv_cleanup = NULL }; int zfs_parsedev(struct zfs_devdesc *dev, const char *devspec, const char **path) { static char rootname[ZFS_MAXNAMELEN]; static char poolname[ZFS_MAXNAMELEN]; spa_t *spa; const char *end; const char *np; const char *sep; int rv; np = devspec; if (*np != ':') return (EINVAL); np++; end = strrchr(np, ':'); if (end == NULL) return (EINVAL); sep = strchr(np, '/'); if (sep == NULL || sep >= end) sep = end; memcpy(poolname, np, sep - np); poolname[sep - np] = '\0'; if (sep < end) { sep++; memcpy(rootname, sep, end - sep); rootname[end - sep] = '\0'; } else rootname[0] = '\0'; spa = spa_find_by_name(poolname); if (!spa) return (ENXIO); dev->pool_guid = spa->spa_guid; rv = zfs_lookup_dataset(spa, rootname, &dev->root_guid); if (rv != 0) return (rv); if (path != NULL) *path = (*end == '\0') ? end : end + 1; dev->dd.d_dev = &zfs_dev; return (0); } char * zfs_fmtdev(void *vdev) { static char rootname[ZFS_MAXNAMELEN]; static char buf[2 * ZFS_MAXNAMELEN + 8]; struct zfs_devdesc *dev = (struct zfs_devdesc *)vdev; spa_t *spa; buf[0] = '\0'; if (dev->dd.d_dev->dv_type != DEVT_ZFS) return (buf); /* Do we have any pools? */ spa = STAILQ_FIRST(&zfs_pools); if (spa == NULL) return (buf); if (dev->pool_guid == 0) dev->pool_guid = spa->spa_guid; else spa = spa_find_by_guid(dev->pool_guid); if (spa == NULL) { printf("ZFS: can't find pool by guid\n"); return (buf); } if (dev->root_guid == 0 && zfs_get_root(spa, &dev->root_guid)) { printf("ZFS: can't find root filesystem\n"); return (buf); } if (zfs_rlookup(spa, dev->root_guid, rootname)) { printf("ZFS: can't find filesystem by guid\n"); return (buf); } if (rootname[0] == '\0') sprintf(buf, "%s:%s:", dev->dd.d_dev->dv_name, spa->spa_name); else sprintf(buf, "%s:%s/%s:", dev->dd.d_dev->dv_name, spa->spa_name, rootname); return (buf); } int zfs_list(const char *name) { static char poolname[ZFS_MAXNAMELEN]; uint64_t objid; spa_t *spa; const char *dsname; int len; int rv; len = strlen(name); dsname = strchr(name, '/'); if (dsname != NULL) { len = dsname - name; dsname++; } else dsname = ""; memcpy(poolname, name, len); poolname[len] = '\0'; spa = spa_find_by_name(poolname); if (!spa) return (ENXIO); rv = zfs_lookup_dataset(spa, dsname, &objid); if (rv != 0) return (rv); return (zfs_list_dataset(spa, objid)); } void init_zfs_bootenv(const char *currdev_in) { char *beroot, *currdev; int currdev_len; currdev = NULL; currdev_len = strlen(currdev_in); if (currdev_len == 0) return; if (strncmp(currdev_in, "zfs:", 4) != 0) return; currdev = strdup(currdev_in); if (currdev == NULL) return; /* Remove the trailing : */ currdev[currdev_len - 1] = '\0'; setenv("zfs_be_active", currdev, 1); setenv("zfs_be_currpage", "1", 1); /* Remove the last element (current bootenv) */ beroot = strrchr(currdev, '/'); if (beroot != NULL) beroot[0] = '\0'; beroot = strchr(currdev, ':') + 1; setenv("zfs_be_root", beroot, 1); zfs_bootenv_initial(beroot); free(currdev); } static void zfs_bootenv_initial(const char *name) { char poolname[ZFS_MAXNAMELEN], *dsname; char envname[32], envval[256]; uint64_t objid; spa_t *spa; int bootenvs_idx, len, rv; SLIST_INIT(&zfs_be_head); zfs_env_count = 0; len = strlen(name); dsname = strchr(name, '/'); if (dsname != NULL) { len = dsname - name; dsname++; } else dsname = ""; strlcpy(poolname, name, len + 1); spa = spa_find_by_name(poolname); if (spa == NULL) return; rv = zfs_lookup_dataset(spa, dsname, &objid); if (rv != 0) return; rv = zfs_callback_dataset(spa, objid, zfs_belist_add); bootenvs_idx = 0; /* Populate the initial environment variables */ SLIST_FOREACH_SAFE(zfs_be, &zfs_be_head, entries, zfs_be_tmp) { /* Enumerate all bootenvs for general usage */ snprintf(envname, sizeof(envname), "bootenvs[%d]", bootenvs_idx); snprintf(envval, sizeof(envval), "zfs:%s/%s", name, zfs_be->name); rv = setenv(envname, envval, 1); if (rv != 0) break; bootenvs_idx++; } snprintf(envval, sizeof(envval), "%d", bootenvs_idx); setenv("bootenvs_count", envval, 1); /* Clean up the SLIST of ZFS BEs */ while (!SLIST_EMPTY(&zfs_be_head)) { zfs_be = SLIST_FIRST(&zfs_be_head); SLIST_REMOVE_HEAD(&zfs_be_head, entries); free(zfs_be->name); free(zfs_be); } return; } int zfs_bootenv(const char *name) { static char poolname[ZFS_MAXNAMELEN], *dsname, *root; char becount[4]; uint64_t objid; spa_t *spa; int len, rv, pages, perpage, currpage; if (name == NULL) return (EINVAL); if ((root = getenv("zfs_be_root")) == NULL) return (EINVAL); if (strcmp(name, root) != 0) { if (setenv("zfs_be_root", name, 1) != 0) return (ENOMEM); } SLIST_INIT(&zfs_be_head); zfs_env_count = 0; len = strlen(name); dsname = strchr(name, '/'); if (dsname != NULL) { len = dsname - name; dsname++; } else dsname = ""; memcpy(poolname, name, len); poolname[len] = '\0'; spa = spa_find_by_name(poolname); if (!spa) return (ENXIO); rv = zfs_lookup_dataset(spa, dsname, &objid); if (rv != 0) return (rv); rv = zfs_callback_dataset(spa, objid, zfs_belist_add); /* Calculate and store the number of pages of BEs */ perpage = (ZFS_BE_LAST - ZFS_BE_FIRST + 1); pages = (zfs_env_count / perpage) + ((zfs_env_count % perpage) > 0 ? 1 : 0); snprintf(becount, 4, "%d", pages); if (setenv("zfs_be_pages", becount, 1) != 0) return (ENOMEM); /* Roll over the page counter if it has exceeded the maximum */ currpage = strtol(getenv("zfs_be_currpage"), NULL, 10); if (currpage > pages) { if (setenv("zfs_be_currpage", "1", 1) != 0) return (ENOMEM); } /* Populate the menu environment variables */ zfs_set_env(); /* Clean up the SLIST of ZFS BEs */ while (!SLIST_EMPTY(&zfs_be_head)) { zfs_be = SLIST_FIRST(&zfs_be_head); SLIST_REMOVE_HEAD(&zfs_be_head, entries); free(zfs_be->name); free(zfs_be); } return (rv); } int zfs_belist_add(const char *name, uint64_t value __unused) { /* Skip special datasets that start with a $ character */ if (strncmp(name, "$", 1) == 0) { return (0); } /* Add the boot environment to the head of the SLIST */ zfs_be = malloc(sizeof(struct zfs_be_entry)); if (zfs_be == NULL) { return (ENOMEM); } zfs_be->name = strdup(name); if (zfs_be->name == NULL) { free(zfs_be); return (ENOMEM); } SLIST_INSERT_HEAD(&zfs_be_head, zfs_be, entries); zfs_env_count++; return (0); } int zfs_set_env(void) { char envname[32], envval[256]; char *beroot, *pagenum; int rv, page, ctr; beroot = getenv("zfs_be_root"); if (beroot == NULL) { return (1); } pagenum = getenv("zfs_be_currpage"); if (pagenum != NULL) { page = strtol(pagenum, NULL, 10); } else { page = 1; } ctr = 1; rv = 0; zfs_env_index = ZFS_BE_FIRST; SLIST_FOREACH_SAFE(zfs_be, &zfs_be_head, entries, zfs_be_tmp) { /* Skip to the requested page number */ if (ctr <= ((ZFS_BE_LAST - ZFS_BE_FIRST + 1) * (page - 1))) { ctr++; continue; } snprintf(envname, sizeof(envname), "bootenvmenu_caption[%d]", zfs_env_index); snprintf(envval, sizeof(envval), "%s", zfs_be->name); rv = setenv(envname, envval, 1); if (rv != 0) { break; } snprintf(envname, sizeof(envname), "bootenvansi_caption[%d]", zfs_env_index); rv = setenv(envname, envval, 1); if (rv != 0){ break; } snprintf(envname, sizeof(envname), "bootenvmenu_command[%d]", zfs_env_index); rv = setenv(envname, "set_bootenv", 1); if (rv != 0){ break; } snprintf(envname, sizeof(envname), "bootenv_root[%d]", zfs_env_index); snprintf(envval, sizeof(envval), "zfs:%s/%s", beroot, zfs_be->name); rv = setenv(envname, envval, 1); if (rv != 0){ break; } zfs_env_index++; if (zfs_env_index > ZFS_BE_LAST) { break; } } for (; zfs_env_index <= ZFS_BE_LAST; zfs_env_index++) { snprintf(envname, sizeof(envname), "bootenvmenu_caption[%d]", zfs_env_index); (void)unsetenv(envname); snprintf(envname, sizeof(envname), "bootenvansi_caption[%d]", zfs_env_index); (void)unsetenv(envname); snprintf(envname, sizeof(envname), "bootenvmenu_command[%d]", zfs_env_index); (void)unsetenv(envname); snprintf(envname, sizeof(envname), "bootenv_root[%d]", zfs_env_index); (void)unsetenv(envname); } return (rv); } Index: head/stand/libsa/zfs/zfsimpl.c =================================================================== --- head/stand/libsa/zfs/zfsimpl.c (revision 362430) +++ head/stand/libsa/zfs/zfsimpl.c (revision 362431) @@ -1,3674 +1,3448 @@ /*- * Copyright (c) 2007 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Stand-alone ZFS file reader. */ #include #include #include #include #include #include "zfsimpl.h" #include "zfssubr.c" struct zfsmount { const spa_t *spa; objset_phys_t objset; uint64_t rootobj; }; static struct zfsmount zfsmount __unused; /* * The indirect_child_t represents the vdev that we will read from, when we * need to read all copies of the data (e.g. for scrub or reconstruction). * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror), * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs, * ic_vdev is a child of the mirror. */ typedef struct indirect_child { void *ic_data; vdev_t *ic_vdev; } indirect_child_t; /* * The indirect_split_t represents one mapped segment of an i/o to the * indirect vdev. For non-split (contiguously-mapped) blocks, there will be * only one indirect_split_t, with is_split_offset==0 and is_size==io_size. * For split blocks, there will be several of these. */ typedef struct indirect_split { list_node_t is_node; /* link on iv_splits */ /* * is_split_offset is the offset into the i/o. * This is the sum of the previous splits' is_size's. */ uint64_t is_split_offset; vdev_t *is_vdev; /* top-level vdev */ uint64_t is_target_offset; /* offset on is_vdev */ uint64_t is_size; int is_children; /* number of entries in is_child[] */ /* * is_good_child is the child that we are currently using to * attempt reconstruction. */ int is_good_child; indirect_child_t is_child[1]; /* variable-length */ } indirect_split_t; /* * The indirect_vsd_t is associated with each i/o to the indirect vdev. * It is the "Vdev-Specific Data" in the zio_t's io_vsd. */ typedef struct indirect_vsd { boolean_t iv_split_block; boolean_t iv_reconstruct; list_t iv_splits; /* list of indirect_split_t's */ } indirect_vsd_t; /* * List of all vdevs, chained through v_alllink. */ static vdev_list_t zfs_vdevs; /* * List of ZFS features supported for read */ static const char *features_for_read[] = { "org.illumos:lz4_compress", "com.delphix:hole_birth", "com.delphix:extensible_dataset", "com.delphix:embedded_data", "org.open-zfs:large_blocks", "org.illumos:sha512", "org.illumos:skein", "org.zfsonlinux:large_dnode", "com.joyent:multi_vdev_crash_dump", "com.delphix:spacemap_histogram", "com.delphix:zpool_checkpoint", "com.delphix:spacemap_v2", "com.datto:encryption", "org.zfsonlinux:allocation_classes", "com.datto:resilver_defer", "com.delphix:device_removal", "com.delphix:obsolete_counts", "com.intel:allocation_classes", NULL }; /* * List of all pools, chained through spa_link. */ static spa_list_t zfs_pools; static const dnode_phys_t *dnode_cache_obj; static uint64_t dnode_cache_bn; static char *dnode_cache_buf; static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf); static int zfs_get_root(const spa_t *spa, uint64_t *objid); static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result); static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t integer_size, uint64_t num_integers, void *value); static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t, dnode_phys_t *); static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *, size_t); static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t); vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *, uint64_t); vdev_indirect_mapping_entry_phys_t * vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t, uint64_t, uint64_t *); static void zfs_init(void) { STAILQ_INIT(&zfs_vdevs); STAILQ_INIT(&zfs_pools); dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE); zfs_init_crc(); } static int -xdr_int(const unsigned char **xdr, int *ip) +nvlist_check_features_for_read(nvlist_t *nvl) { - *ip = be32dec(*xdr); - (*xdr) += 4; - return (0); -} - -static int -xdr_u_int(const unsigned char **xdr, u_int *ip) -{ - *ip = be32dec(*xdr); - (*xdr) += 4; - return (0); -} - -static int -xdr_uint64_t(const unsigned char **xdr, uint64_t *lp) -{ - u_int hi, lo; - - xdr_u_int(xdr, &hi); - xdr_u_int(xdr, &lo); - *lp = (((uint64_t)hi) << 32) | lo; - return (0); -} - -static int -nvlist_find(const unsigned char *nvlist, const char *name, int type, - int *elementsp, void *valuep, int *sizep) -{ - const unsigned char *p, *pair; - int junk; - int encoded_size, decoded_size; - - p = nvlist; - xdr_int(&p, &junk); - xdr_int(&p, &junk); - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - while (encoded_size && decoded_size) { - int namelen, pairtype, elements; - const char *pairname; - - xdr_int(&p, &namelen); - pairname = (const char *)p; - p += roundup(namelen, 4); - xdr_int(&p, &pairtype); - - if (memcmp(name, pairname, namelen) == 0 && type == pairtype) { - xdr_int(&p, &elements); - if (elementsp) - *elementsp = elements; - if (type == DATA_TYPE_UINT64) { - xdr_uint64_t(&p, (uint64_t *)valuep); - return (0); - } else if (type == DATA_TYPE_STRING) { - int len; - xdr_int(&p, &len); - if (sizep != NULL) - *sizep = len; - (*(const char **)valuep) = (const char *)p; - return (0); - } else if (type == DATA_TYPE_NVLIST || - type == DATA_TYPE_NVLIST_ARRAY) { - (*(const unsigned char **)valuep) = - (const unsigned char *)p; - return (0); - } else { - return (EIO); - } - } else { - /* - * Not the pair we are looking for, skip to the - * next one. - */ - p = pair + encoded_size; - } - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - } - - return (EIO); -} - -static int -nvlist_check_features_for_read(const unsigned char *nvlist) -{ - const unsigned char *p, *pair; - int junk; - int encoded_size, decoded_size; + nvlist_t *features = NULL; + nvs_data_t *data; + nvp_header_t *nvp; + nv_string_t *nvp_name; int rc; - rc = 0; + rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ, + DATA_TYPE_NVLIST, NULL, &features, NULL); + if (rc != 0) + return (rc); - p = nvlist; - xdr_int(&p, &junk); - xdr_int(&p, &junk); + data = (nvs_data_t *)features->nv_data; + nvp = &data->nvl_pair; /* first pair in nvlist */ - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - while (encoded_size && decoded_size) { - int namelen, pairtype; - const char *pairname; + while (nvp->encoded_size != 0 && nvp->decoded_size != 0) { int i, found; + nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp)); found = 0; - xdr_int(&p, &namelen); - pairname = (const char *)p; - p += roundup(namelen, 4); - xdr_int(&p, &pairtype); - for (i = 0; features_for_read[i] != NULL; i++) { - if (memcmp(pairname, features_for_read[i], - namelen) == 0) { + if (memcmp(nvp_name->nv_data, features_for_read[i], + nvp_name->nv_size) == 0) { found = 1; break; } } if (!found) { - printf("ZFS: unsupported feature: %s\n", pairname); + printf("ZFS: unsupported feature: %.*s\n", + nvp_name->nv_size, nvp_name->nv_data); rc = EIO; } - - p = pair + encoded_size; - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); + nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size); } + nvlist_destroy(features); return (rc); } -/* - * Return the next nvlist in an nvlist array. - */ -static const unsigned char * -nvlist_next(const unsigned char *nvlist) -{ - const unsigned char *p, *pair; - int junk; - int encoded_size, decoded_size; - - p = nvlist; - xdr_int(&p, &junk); - xdr_int(&p, &junk); - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - while (encoded_size && decoded_size) { - p = pair + encoded_size; - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - } - - return (p); -} - -#ifdef TEST - -static const unsigned char * -nvlist_print(const unsigned char *nvlist, unsigned int indent) -{ - static const char *typenames[] = { - "DATA_TYPE_UNKNOWN", - "DATA_TYPE_BOOLEAN", - "DATA_TYPE_BYTE", - "DATA_TYPE_INT16", - "DATA_TYPE_UINT16", - "DATA_TYPE_INT32", - "DATA_TYPE_UINT32", - "DATA_TYPE_INT64", - "DATA_TYPE_UINT64", - "DATA_TYPE_STRING", - "DATA_TYPE_BYTE_ARRAY", - "DATA_TYPE_INT16_ARRAY", - "DATA_TYPE_UINT16_ARRAY", - "DATA_TYPE_INT32_ARRAY", - "DATA_TYPE_UINT32_ARRAY", - "DATA_TYPE_INT64_ARRAY", - "DATA_TYPE_UINT64_ARRAY", - "DATA_TYPE_STRING_ARRAY", - "DATA_TYPE_HRTIME", - "DATA_TYPE_NVLIST", - "DATA_TYPE_NVLIST_ARRAY", - "DATA_TYPE_BOOLEAN_VALUE", - "DATA_TYPE_INT8", - "DATA_TYPE_UINT8", - "DATA_TYPE_BOOLEAN_ARRAY", - "DATA_TYPE_INT8_ARRAY", - "DATA_TYPE_UINT8_ARRAY" - }; - - unsigned int i, j; - const unsigned char *p, *pair; - int junk; - int encoded_size, decoded_size; - - p = nvlist; - xdr_int(&p, &junk); - xdr_int(&p, &junk); - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - while (encoded_size && decoded_size) { - int namelen, pairtype, elements; - const char *pairname; - - xdr_int(&p, &namelen); - pairname = (const char *)p; - p += roundup(namelen, 4); - xdr_int(&p, &pairtype); - - for (i = 0; i < indent; i++) - printf(" "); - printf("%s %.*s", typenames[pairtype], namelen, pairname); - - xdr_int(&p, &elements); - switch (pairtype) { - case DATA_TYPE_UINT64: { - uint64_t val; - xdr_uint64_t(&p, &val); - printf(" = 0x%jx\n", (uintmax_t)val); - break; - } - - case DATA_TYPE_STRING: { - int len; - xdr_int(&p, &len); - printf(" = \"%.*s\"\n", len, p); - break; - } - - case DATA_TYPE_NVLIST: - printf("\n"); - nvlist_print(p, indent + 1); - break; - - case DATA_TYPE_NVLIST_ARRAY: - for (j = 0; j < elements; j++) { - printf("[%d]\n", j); - p = nvlist_print(p, indent + 1); - if (j != elements - 1) { - for (i = 0; i < indent; i++) - printf(" "); - printf("%s %.*s", typenames[pairtype], - namelen, pairname); - } - } - break; - - default: - printf("\n"); - } - - p = pair + encoded_size; - - pair = p; - xdr_int(&p, &encoded_size); - xdr_int(&p, &decoded_size); - } - - return (p); -} - -#endif - static int vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t size) { size_t psize; int rc; if (!vdev->v_phys_read) return (EIO); if (bp) { psize = BP_GET_PSIZE(bp); } else { psize = size; } rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize); if (rc == 0) { if (bp != NULL) rc = zio_checksum_verify(vdev->v_spa, bp, buf); } return (rc); } typedef struct remap_segment { vdev_t *rs_vd; uint64_t rs_offset; uint64_t rs_asize; uint64_t rs_split_offset; list_node_t rs_node; } remap_segment_t; static remap_segment_t * rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset) { remap_segment_t *rs = malloc(sizeof (remap_segment_t)); if (rs != NULL) { rs->rs_vd = vd; rs->rs_offset = offset; rs->rs_asize = asize; rs->rs_split_offset = split_offset; } return (rs); } vdev_indirect_mapping_t * vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os, uint64_t mapping_object) { vdev_indirect_mapping_t *vim; vdev_indirect_mapping_phys_t *vim_phys; int rc; vim = calloc(1, sizeof (*vim)); if (vim == NULL) return (NULL); vim->vim_dn = calloc(1, sizeof (*vim->vim_dn)); if (vim->vim_dn == NULL) { free(vim); return (NULL); } rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn); if (rc != 0) { free(vim->vim_dn); free(vim); return (NULL); } vim->vim_spa = spa; vim->vim_phys = malloc(sizeof (*vim->vim_phys)); if (vim->vim_phys == NULL) { free(vim->vim_dn); free(vim); return (NULL); } vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn); *vim->vim_phys = *vim_phys; vim->vim_objset = os; vim->vim_object = mapping_object; vim->vim_entries = NULL; vim->vim_havecounts = (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0); return (vim); } /* * Compare an offset with an indirect mapping entry; there are three * possible scenarios: * * 1. The offset is "less than" the mapping entry; meaning the * offset is less than the source offset of the mapping entry. In * this case, there is no overlap between the offset and the * mapping entry and -1 will be returned. * * 2. The offset is "greater than" the mapping entry; meaning the * offset is greater than the mapping entry's source offset plus * the entry's size. In this case, there is no overlap between * the offset and the mapping entry and 1 will be returned. * * NOTE: If the offset is actually equal to the entry's offset * plus size, this is considered to be "greater" than the entry, * and this case applies (i.e. 1 will be returned). Thus, the * entry's "range" can be considered to be inclusive at its * start, but exclusive at its end: e.g. [src, src + size). * * 3. The last case to consider is if the offset actually falls * within the mapping entry's range. If this is the case, the * offset is considered to be "equal to" the mapping entry and * 0 will be returned. * * NOTE: If the offset is equal to the entry's source offset, * this case applies and 0 will be returned. If the offset is * equal to the entry's source plus its size, this case does * *not* apply (see "NOTE" above for scenario 2), and 1 will be * returned. */ static int dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem) { const uint64_t *key = v_key; const vdev_indirect_mapping_entry_phys_t *array_elem = v_array_elem; uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem); if (*key < src_offset) { return (-1); } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) { return (0); } else { return (1); } } /* * Return array entry. */ static vdev_indirect_mapping_entry_phys_t * vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index) { uint64_t size; off_t offset = 0; int rc; if (vim->vim_phys->vimp_num_entries == 0) return (NULL); if (vim->vim_entries == NULL) { uint64_t bsize; bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT; size = vim->vim_phys->vimp_num_entries * sizeof (*vim->vim_entries); if (size > bsize) { size = bsize / sizeof (*vim->vim_entries); size *= sizeof (*vim->vim_entries); } vim->vim_entries = malloc(size); if (vim->vim_entries == NULL) return (NULL); vim->vim_num_entries = size / sizeof (*vim->vim_entries); offset = index * sizeof (*vim->vim_entries); } /* We have data in vim_entries */ if (offset == 0) { if (index >= vim->vim_entry_offset && index <= vim->vim_entry_offset + vim->vim_num_entries) { index -= vim->vim_entry_offset; return (&vim->vim_entries[index]); } offset = index * sizeof (*vim->vim_entries); } vim->vim_entry_offset = index; size = vim->vim_num_entries * sizeof (*vim->vim_entries); rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries, size); if (rc != 0) { /* Read error, invalidate vim_entries. */ free(vim->vim_entries); vim->vim_entries = NULL; return (NULL); } index -= vim->vim_entry_offset; return (&vim->vim_entries[index]); } /* * Returns the mapping entry for the given offset. * * It's possible that the given offset will not be in the mapping table * (i.e. no mapping entries contain this offset), in which case, the * return value value depends on the "next_if_missing" parameter. * * If the offset is not found in the table and "next_if_missing" is * B_FALSE, then NULL will always be returned. The behavior is intended * to allow consumers to get the entry corresponding to the offset * parameter, iff the offset overlaps with an entry in the table. * * If the offset is not found in the table and "next_if_missing" is * B_TRUE, then the entry nearest to the given offset will be returned, * such that the entry's source offset is greater than the offset * passed in (i.e. the "next" mapping entry in the table is returned, if * the offset is missing from the table). If there are no entries whose * source offset is greater than the passed in offset, NULL is returned. */ static vdev_indirect_mapping_entry_phys_t * vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim, uint64_t offset) { ASSERT(vim->vim_phys->vimp_num_entries > 0); vdev_indirect_mapping_entry_phys_t *entry; uint64_t last = vim->vim_phys->vimp_num_entries - 1; uint64_t base = 0; /* * We don't define these inside of the while loop because we use * their value in the case that offset isn't in the mapping. */ uint64_t mid; int result; while (last >= base) { mid = base + ((last - base) >> 1); entry = vdev_indirect_mapping_entry(vim, mid); if (entry == NULL) break; result = dva_mapping_overlap_compare(&offset, entry); if (result == 0) { break; } else if (result < 0) { last = mid - 1; } else { base = mid + 1; } } return (entry); } /* * Given an indirect vdev and an extent on that vdev, it duplicates the * physical entries of the indirect mapping that correspond to the extent * to a new array and returns a pointer to it. In addition, copied_entries * is populated with the number of mapping entries that were duplicated. * * Finally, since we are doing an allocation, it is up to the caller to * free the array allocated in this function. */ vdev_indirect_mapping_entry_phys_t * vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t *copied_entries) { vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL; vdev_indirect_mapping_t *vim = vd->v_mapping; uint64_t entries = 0; vdev_indirect_mapping_entry_phys_t *first_mapping = vdev_indirect_mapping_entry_for_offset(vim, offset); ASSERT3P(first_mapping, !=, NULL); vdev_indirect_mapping_entry_phys_t *m = first_mapping; while (asize > 0) { uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m); uint64_t inner_size = MIN(asize, size - inner_offset); offset += inner_size; asize -= inner_size; entries++; m++; } size_t copy_length = entries * sizeof (*first_mapping); duplicate_mappings = malloc(copy_length); if (duplicate_mappings != NULL) bcopy(first_mapping, duplicate_mappings, copy_length); else entries = 0; *copied_entries = entries; return (duplicate_mappings); } static vdev_t * vdev_lookup_top(spa_t *spa, uint64_t vdev) { vdev_t *rvd; vdev_list_t *vlist; vlist = &spa->spa_root_vdev->v_children; STAILQ_FOREACH(rvd, vlist, v_childlink) if (rvd->v_id == vdev) break; return (rvd); } /* * This is a callback for vdev_indirect_remap() which allocates an * indirect_split_t for each split segment and adds it to iv_splits. */ static void vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset, uint64_t size, void *arg) { int n = 1; zio_t *zio = arg; indirect_vsd_t *iv = zio->io_vsd; if (vd->v_read == vdev_indirect_read) return; if (vd->v_read == vdev_mirror_read) n = vd->v_nchildren; indirect_split_t *is = malloc(offsetof(indirect_split_t, is_child[n])); if (is == NULL) { zio->io_error = ENOMEM; return; } bzero(is, offsetof(indirect_split_t, is_child[n])); is->is_children = n; is->is_size = size; is->is_split_offset = split_offset; is->is_target_offset = offset; is->is_vdev = vd; /* * Note that we only consider multiple copies of the data for * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even * though they use the same ops as mirror, because there's only one * "good" copy under the replacing/spare. */ if (vd->v_read == vdev_mirror_read) { int i = 0; vdev_t *kid; STAILQ_FOREACH(kid, &vd->v_children, v_childlink) { is->is_child[i++].ic_vdev = kid; } } else { is->is_child[0].ic_vdev = vd; } list_insert_tail(&iv->iv_splits, is); } static void vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg) { list_t stack; spa_t *spa = vd->v_spa; zio_t *zio = arg; remap_segment_t *rs; list_create(&stack, sizeof (remap_segment_t), offsetof(remap_segment_t, rs_node)); rs = rs_alloc(vd, offset, asize, 0); if (rs == NULL) { printf("vdev_indirect_remap: out of memory.\n"); zio->io_error = ENOMEM; } for (; rs != NULL; rs = list_remove_head(&stack)) { vdev_t *v = rs->rs_vd; uint64_t num_entries = 0; /* vdev_indirect_mapping_t *vim = v->v_mapping; */ vdev_indirect_mapping_entry_phys_t *mapping = vdev_indirect_mapping_duplicate_adjacent_entries(v, rs->rs_offset, rs->rs_asize, &num_entries); if (num_entries == 0) zio->io_error = ENOMEM; for (uint64_t i = 0; i < num_entries; i++) { vdev_indirect_mapping_entry_phys_t *m = &mapping[i]; uint64_t size = DVA_GET_ASIZE(&m->vimep_dst); uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst); uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst); uint64_t inner_offset = rs->rs_offset - DVA_MAPPING_GET_SRC_OFFSET(m); uint64_t inner_size = MIN(rs->rs_asize, size - inner_offset); vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev); if (dst_v->v_read == vdev_indirect_read) { remap_segment_t *o; o = rs_alloc(dst_v, dst_offset + inner_offset, inner_size, rs->rs_split_offset); if (o == NULL) { printf("vdev_indirect_remap: " "out of memory.\n"); zio->io_error = ENOMEM; break; } list_insert_head(&stack, o); } vdev_indirect_gather_splits(rs->rs_split_offset, dst_v, dst_offset + inner_offset, inner_size, arg); /* * vdev_indirect_gather_splits can have memory * allocation error, we can not recover from it. */ if (zio->io_error != 0) break; rs->rs_offset += inner_size; rs->rs_asize -= inner_size; rs->rs_split_offset += inner_size; } free(mapping); free(rs); if (zio->io_error != 0) break; } list_destroy(&stack); } static void vdev_indirect_map_free(zio_t *zio) { indirect_vsd_t *iv = zio->io_vsd; indirect_split_t *is; while ((is = list_head(&iv->iv_splits)) != NULL) { for (int c = 0; c < is->is_children; c++) { indirect_child_t *ic = &is->is_child[c]; free(ic->ic_data); } list_remove(&iv->iv_splits, is); free(is); } free(iv); } static int vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t bytes) { zio_t zio; spa_t *spa = vdev->v_spa; indirect_vsd_t *iv; indirect_split_t *first; int rc = EIO; iv = calloc(1, sizeof(*iv)); if (iv == NULL) return (ENOMEM); list_create(&iv->iv_splits, sizeof (indirect_split_t), offsetof(indirect_split_t, is_node)); bzero(&zio, sizeof(zio)); zio.io_spa = spa; zio.io_bp = (blkptr_t *)bp; zio.io_data = buf; zio.io_size = bytes; zio.io_offset = offset; zio.io_vd = vdev; zio.io_vsd = iv; if (vdev->v_mapping == NULL) { vdev_indirect_config_t *vic; vic = &vdev->vdev_indirect_config; vdev->v_mapping = vdev_indirect_mapping_open(spa, &spa->spa_mos, vic->vic_mapping_object); } vdev_indirect_remap(vdev, offset, bytes, &zio); if (zio.io_error != 0) return (zio.io_error); first = list_head(&iv->iv_splits); if (first->is_size == zio.io_size) { /* * This is not a split block; we are pointing to the entire * data, which will checksum the same as the original data. * Pass the BP down so that the child i/o can verify the * checksum, and try a different location if available * (e.g. on a mirror). * * While this special case could be handled the same as the * general (split block) case, doing it this way ensures * that the vast majority of blocks on indirect vdevs * (which are not split) are handled identically to blocks * on non-indirect vdevs. This allows us to be less strict * about performance in the general (but rare) case. */ rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp, zio.io_data, first->is_target_offset, bytes); } else { iv->iv_split_block = B_TRUE; /* * Read one copy of each split segment, from the * top-level vdev. Since we don't know the * checksum of each split individually, the child * zio can't ensure that we get the right data. * E.g. if it's a mirror, it will just read from a * random (healthy) leaf vdev. We have to verify * the checksum in vdev_indirect_io_done(). */ for (indirect_split_t *is = list_head(&iv->iv_splits); is != NULL; is = list_next(&iv->iv_splits, is)) { char *ptr = zio.io_data; rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp, ptr + is->is_split_offset, is->is_target_offset, is->is_size); } if (zio_checksum_verify(spa, zio.io_bp, zio.io_data)) rc = ECKSUM; else rc = 0; } vdev_indirect_map_free(&zio); if (rc == 0) rc = zio.io_error; return (rc); } static int vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t bytes) { return (vdev_read_phys(vdev, bp, buf, offset + VDEV_LABEL_START_SIZE, bytes)); } static int vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t bytes) { vdev_t *kid; int rc; rc = EIO; STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { if (kid->v_state != VDEV_STATE_HEALTHY) continue; rc = kid->v_read(kid, bp, buf, offset, bytes); if (!rc) return (0); } return (rc); } static int vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t bytes) { vdev_t *kid; /* * Here we should have two kids: * First one which is the one we are replacing and we can trust * only this one to have valid data, but it might not be present. * Second one is that one we are replacing with. It is most likely * healthy, but we can't trust it has needed data, so we won't use it. */ kid = STAILQ_FIRST(&vdev->v_children); if (kid == NULL) return (EIO); if (kid->v_state != VDEV_STATE_HEALTHY) return (EIO); return (kid->v_read(kid, bp, buf, offset, bytes)); } static vdev_t * vdev_find(uint64_t guid) { vdev_t *vdev; STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink) if (vdev->v_guid == guid) return (vdev); return (0); } static vdev_t * vdev_create(uint64_t guid, vdev_read_t *_read) { vdev_t *vdev; vdev_indirect_config_t *vic; vdev = calloc(1, sizeof(vdev_t)); if (vdev != NULL) { STAILQ_INIT(&vdev->v_children); vdev->v_guid = guid; vdev->v_read = _read; /* * root vdev has no read function, we use this fact to * skip setting up data we do not need for root vdev. * We only point root vdev from spa. */ if (_read != NULL) { vic = &vdev->vdev_indirect_config; vic->vic_prev_indirect_vdev = UINT64_MAX; STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink); } } return (vdev); } static void -vdev_set_initial_state(vdev_t *vdev, const unsigned char *nvlist) +vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist) { uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present; uint64_t is_log; is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0; is_log = 0; (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL, &is_offline, NULL); (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL, &is_removed, NULL); (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL, &is_faulted, NULL); (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64, NULL, &is_degraded, NULL); (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64, NULL, &isnt_present, NULL); (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL, &is_log, NULL); if (is_offline != 0) vdev->v_state = VDEV_STATE_OFFLINE; else if (is_removed != 0) vdev->v_state = VDEV_STATE_REMOVED; else if (is_faulted != 0) vdev->v_state = VDEV_STATE_FAULTED; else if (is_degraded != 0) vdev->v_state = VDEV_STATE_DEGRADED; else if (isnt_present != 0) vdev->v_state = VDEV_STATE_CANT_OPEN; vdev->v_islog = is_log != 0; } static int -vdev_init(uint64_t guid, const unsigned char *nvlist, vdev_t **vdevp) +vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp) { uint64_t id, ashift, asize, nparity; const char *path; const char *type; int len, pathlen; char *name; vdev_t *vdev; if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id, NULL) || - nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, - NULL, &type, &len)) { + nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL, + &type, &len)) { return (ENOENT); } if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 && memcmp(type, VDEV_TYPE_DISK, len) != 0 && #ifdef ZFS_TEST memcmp(type, VDEV_TYPE_FILE, len) != 0 && #endif memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 && memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 && memcmp(type, VDEV_TYPE_REPLACING, len) != 0) { printf("ZFS: can only boot from disk, mirror, raidz1, " "raidz2 and raidz3 vdevs\n"); return (EIO); } if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0) vdev = vdev_create(guid, vdev_mirror_read); else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) vdev = vdev_create(guid, vdev_raidz_read); else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0) vdev = vdev_create(guid, vdev_replacing_read); else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) { vdev_indirect_config_t *vic; vdev = vdev_create(guid, vdev_indirect_read); if (vdev != NULL) { vdev->v_state = VDEV_STATE_HEALTHY; vic = &vdev->vdev_indirect_config; nvlist_find(nvlist, ZPOOL_CONFIG_INDIRECT_OBJECT, DATA_TYPE_UINT64, NULL, &vic->vic_mapping_object, NULL); nvlist_find(nvlist, ZPOOL_CONFIG_INDIRECT_BIRTHS, DATA_TYPE_UINT64, NULL, &vic->vic_births_object, NULL); nvlist_find(nvlist, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, DATA_TYPE_UINT64, NULL, &vic->vic_prev_indirect_vdev, NULL); } } else { vdev = vdev_create(guid, vdev_disk_read); } if (vdev == NULL) return (ENOMEM); vdev_set_initial_state(vdev, nvlist); vdev->v_id = id; if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT, DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0) vdev->v_ashift = ashift; if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { vdev->v_psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; } if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY, DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0) vdev->v_nparity = nparity; if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH, DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) { char prefix[] = "/dev/"; len = strlen(prefix); if (len < pathlen && memcmp(path, prefix, len) == 0) { path += len; pathlen -= len; } name = malloc(pathlen + 1); bcopy(path, name, pathlen); name[pathlen] = '\0'; vdev->v_name = name; } else { name = NULL; if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) { if (vdev->v_nparity < 1 || vdev->v_nparity > 3) { printf("ZFS: invalid raidz parity: %d\n", vdev->v_nparity); return (EIO); } (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type, vdev->v_nparity, id); } else { (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id); } vdev->v_name = name; } *vdevp = vdev; return (0); } /* * Find slot for vdev. We return either NULL to signal to use * STAILQ_INSERT_HEAD, or we return link element to be used with * STAILQ_INSERT_AFTER. */ static vdev_t * vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev) { vdev_t *v, *previous; if (STAILQ_EMPTY(&top_vdev->v_children)) return (NULL); previous = NULL; STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) { if (v->v_id > vdev->v_id) return (previous); if (v->v_id == vdev->v_id) return (v); if (v->v_id < vdev->v_id) previous = v; } return (previous); } static size_t vdev_child_count(vdev_t *vdev) { vdev_t *v; size_t count; count = 0; STAILQ_FOREACH(v, &vdev->v_children, v_childlink) { count++; } return (count); } /* * Insert vdev into top_vdev children list. List is ordered by v_id. */ static void vdev_insert(vdev_t *top_vdev, vdev_t *vdev) { vdev_t *previous; size_t count; /* * The top level vdev can appear in random order, depending how * the firmware is presenting the disk devices. * However, we will insert vdev to create list ordered by v_id, * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER * as STAILQ does not have insert before. */ previous = vdev_find_previous(top_vdev, vdev); if (previous == NULL) { STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink); } else if (previous->v_id == vdev->v_id) { /* * This vdev was configured from label config, * do not insert duplicate. */ return; } else { STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev, v_childlink); } count = vdev_child_count(top_vdev); if (top_vdev->v_nchildren < count) top_vdev->v_nchildren = count; } static int -vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const unsigned char *nvlist) +vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const nvlist_t *nvlist) { vdev_t *top_vdev, *vdev; - const unsigned char *kids; + nvlist_t *kids = NULL; int rc, nkids; /* Get top vdev. */ top_vdev = vdev_find(top_guid); if (top_vdev == NULL) { rc = vdev_init(top_guid, nvlist, &top_vdev); if (rc != 0) return (rc); top_vdev->v_spa = spa; top_vdev->v_top = top_vdev; vdev_insert(spa->spa_root_vdev, top_vdev); } /* Add children if there are any. */ rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL); if (rc == 0) { for (int i = 0; i < nkids; i++) { uint64_t guid; rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, NULL, &guid, NULL); - if (rc != 0) + if (rc != 0) { + nvlist_destroy(kids); return (rc); + } rc = vdev_init(guid, kids, &vdev); if (rc != 0) return (rc); vdev->v_spa = spa; vdev->v_top = top_vdev; vdev_insert(top_vdev, vdev); - kids = nvlist_next(kids); + rc = nvlist_next(kids); } } else { /* * When there are no children, nvlist_find() does return * error, reset it because leaf devices have no children. */ rc = 0; } + nvlist_destroy(kids); return (rc); } static int -vdev_init_from_label(spa_t *spa, const unsigned char *nvlist) +vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist) { uint64_t pool_guid, top_guid; - const unsigned char *vdevs; + nvlist_t *vdevs; + int rc; if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, NULL, &pool_guid, NULL) || nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64, NULL, &top_guid, NULL) || nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL, &vdevs, NULL)) { printf("ZFS: can't find vdev details\n"); return (ENOENT); } - return (vdev_from_nvlist(spa, top_guid, vdevs)); + rc = vdev_from_nvlist(spa, top_guid, vdevs); + nvlist_destroy(vdevs); + return (rc); } static void vdev_set_state(vdev_t *vdev) { vdev_t *kid; int good_kids; int bad_kids; STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { vdev_set_state(kid); } /* * A mirror or raidz is healthy if all its kids are healthy. A * mirror is degraded if any of its kids is healthy; a raidz * is degraded if at most nparity kids are offline. */ if (STAILQ_FIRST(&vdev->v_children)) { good_kids = 0; bad_kids = 0; STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { if (kid->v_state == VDEV_STATE_HEALTHY) good_kids++; else bad_kids++; } if (bad_kids == 0) { vdev->v_state = VDEV_STATE_HEALTHY; } else { if (vdev->v_read == vdev_mirror_read) { if (good_kids) { vdev->v_state = VDEV_STATE_DEGRADED; } else { vdev->v_state = VDEV_STATE_OFFLINE; } } else if (vdev->v_read == vdev_raidz_read) { if (bad_kids > vdev->v_nparity) { vdev->v_state = VDEV_STATE_OFFLINE; } else { vdev->v_state = VDEV_STATE_DEGRADED; } } } } } static int -vdev_update_from_nvlist(uint64_t top_guid, const unsigned char *nvlist) +vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist) { vdev_t *vdev; - const unsigned char *kids; + nvlist_t *kids = NULL; int rc, nkids; /* Update top vdev. */ vdev = vdev_find(top_guid); if (vdev != NULL) vdev_set_initial_state(vdev, nvlist); /* Update children if there are any. */ rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL); if (rc == 0) { for (int i = 0; i < nkids; i++) { uint64_t guid; rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, NULL, &guid, NULL); if (rc != 0) break; vdev = vdev_find(guid); if (vdev != NULL) vdev_set_initial_state(vdev, kids); - kids = nvlist_next(kids); + rc = nvlist_next(kids); } } else { rc = 0; } + nvlist_destroy(kids); return (rc); } static int -vdev_init_from_nvlist(spa_t *spa, const unsigned char *nvlist) +vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist) { uint64_t pool_guid, vdev_children; - const unsigned char *vdevs, *kids; + nvlist_t *vdevs = NULL, *kids = NULL; int rc, nkids; if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, NULL, &pool_guid, NULL) || nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, NULL, &vdev_children, NULL) || nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL, &vdevs, NULL)) { printf("ZFS: can't find vdev details\n"); return (ENOENT); } /* Wrong guid?! */ - if (spa->spa_guid != pool_guid) + if (spa->spa_guid != pool_guid) { + nvlist_destroy(vdevs); return (EINVAL); + } spa->spa_root_vdev->v_nchildren = vdev_children; rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL); + nvlist_destroy(vdevs); /* * MOS config has at least one child for root vdev. */ if (rc != 0) return (rc); for (int i = 0; i < nkids; i++) { uint64_t guid; vdev_t *vdev; rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, NULL, &guid, NULL); if (rc != 0) break; vdev = vdev_find(guid); /* * Top level vdev is missing, create it. */ if (vdev == NULL) rc = vdev_from_nvlist(spa, guid, kids); else rc = vdev_update_from_nvlist(guid, kids); if (rc != 0) break; - kids = nvlist_next(kids); + nvlist_next(kids); } + nvlist_destroy(kids); /* * Re-evaluate top-level vdev state. */ vdev_set_state(spa->spa_root_vdev); return (rc); } static spa_t * spa_find_by_guid(uint64_t guid) { spa_t *spa; STAILQ_FOREACH(spa, &zfs_pools, spa_link) if (spa->spa_guid == guid) return (spa); return (NULL); } static spa_t * spa_find_by_name(const char *name) { spa_t *spa; STAILQ_FOREACH(spa, &zfs_pools, spa_link) if (strcmp(spa->spa_name, name) == 0) return (spa); return (NULL); } #ifdef BOOT2 static spa_t * spa_get_primary(void) { return (STAILQ_FIRST(&zfs_pools)); } static vdev_t * spa_get_primary_vdev(const spa_t *spa) { vdev_t *vdev; vdev_t *kid; if (spa == NULL) spa = spa_get_primary(); if (spa == NULL) return (NULL); vdev = spa->spa_root_vdev; if (vdev == NULL) return (NULL); for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL; kid = STAILQ_FIRST(&vdev->v_children)) vdev = kid; return (vdev); } #endif static spa_t * spa_create(uint64_t guid, const char *name) { spa_t *spa; if ((spa = calloc(1, sizeof(spa_t))) == NULL) return (NULL); if ((spa->spa_name = strdup(name)) == NULL) { free(spa); return (NULL); } spa->spa_guid = guid; spa->spa_root_vdev = vdev_create(guid, NULL); if (spa->spa_root_vdev == NULL) { free(spa->spa_name); free(spa); return (NULL); } spa->spa_root_vdev->v_name = strdup("root"); STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link); return (spa); } static const char * state_name(vdev_state_t state) { static const char *names[] = { "UNKNOWN", "CLOSED", "OFFLINE", "REMOVED", "CANT_OPEN", "FAULTED", "DEGRADED", "ONLINE" }; return (names[state]); } #ifdef BOOT2 #define pager_printf printf #else static int pager_printf(const char *fmt, ...) { char line[80]; va_list args; va_start(args, fmt); vsnprintf(line, sizeof(line), fmt, args); va_end(args); return (pager_output(line)); } #endif #define STATUS_FORMAT " %s %s\n" static int print_state(int indent, const char *name, vdev_state_t state) { int i; char buf[512]; buf[0] = 0; for (i = 0; i < indent; i++) strcat(buf, " "); strcat(buf, name); return (pager_printf(STATUS_FORMAT, buf, state_name(state))); } static int vdev_status(vdev_t *vdev, int indent) { vdev_t *kid; int ret; if (vdev->v_islog) { (void) pager_output(" logs\n"); indent++; } ret = print_state(indent, vdev->v_name, vdev->v_state); if (ret != 0) return (ret); STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) { ret = vdev_status(kid, indent + 1); if (ret != 0) return (ret); } return (ret); } static int spa_status(spa_t *spa) { static char bootfs[ZFS_MAXNAMELEN]; uint64_t rootid; vdev_list_t *vlist; vdev_t *vdev; int good_kids, bad_kids, degraded_kids, ret; vdev_state_t state; ret = pager_printf(" pool: %s\n", spa->spa_name); if (ret != 0) return (ret); if (zfs_get_root(spa, &rootid) == 0 && zfs_rlookup(spa, rootid, bootfs) == 0) { if (bootfs[0] == '\0') ret = pager_printf("bootfs: %s\n", spa->spa_name); else ret = pager_printf("bootfs: %s/%s\n", spa->spa_name, bootfs); if (ret != 0) return (ret); } ret = pager_printf("config:\n\n"); if (ret != 0) return (ret); ret = pager_printf(STATUS_FORMAT, "NAME", "STATE"); if (ret != 0) return (ret); good_kids = 0; degraded_kids = 0; bad_kids = 0; vlist = &spa->spa_root_vdev->v_children; STAILQ_FOREACH(vdev, vlist, v_childlink) { if (vdev->v_state == VDEV_STATE_HEALTHY) good_kids++; else if (vdev->v_state == VDEV_STATE_DEGRADED) degraded_kids++; else bad_kids++; } state = VDEV_STATE_CLOSED; if (good_kids > 0 && (degraded_kids + bad_kids) == 0) state = VDEV_STATE_HEALTHY; else if ((good_kids + degraded_kids) > 0) state = VDEV_STATE_DEGRADED; ret = print_state(0, spa->spa_name, state); if (ret != 0) return (ret); STAILQ_FOREACH(vdev, vlist, v_childlink) { ret = vdev_status(vdev, 1); if (ret != 0) return (ret); } return (ret); } static int spa_all_status(void) { spa_t *spa; int first = 1, ret = 0; STAILQ_FOREACH(spa, &zfs_pools, spa_link) { if (!first) { ret = pager_printf("\n"); if (ret != 0) return (ret); } first = 0; ret = spa_status(spa); if (ret != 0) return (ret); } return (ret); } static uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset) { uint64_t label_offset; if (l < VDEV_LABELS / 2) label_offset = 0; else label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t); return (offset + l * sizeof (vdev_label_t) + label_offset); } static int vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) { unsigned int seq1 = 0; unsigned int seq2 = 0; int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); if (cmp != 0) return (cmp); cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); if (cmp != 0) return (cmp); if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) seq1 = MMP_SEQ(ub1); if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) seq2 = MMP_SEQ(ub2); return (AVL_CMP(seq1, seq2)); } static int uberblock_verify(uberblock_t *ub) { if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) { byteswap_uint64_array(ub, sizeof (uberblock_t)); } if (ub->ub_magic != UBERBLOCK_MAGIC || !SPA_VERSION_IS_SUPPORTED(ub->ub_version)) return (EINVAL); return (0); } static int vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset, size_t size) { blkptr_t bp; off_t off; off = vdev_label_offset(vd->v_psize, l, offset); BP_ZERO(&bp); BP_SET_LSIZE(&bp, size); BP_SET_PSIZE(&bp, size); BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL); BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); DVA_SET_OFFSET(BP_IDENTITY(&bp), off); ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0); return (vdev_read_phys(vd, &bp, buf, off, size)); } -static unsigned char * +static nvlist_t * vdev_label_read_config(vdev_t *vd, uint64_t txg) { vdev_phys_t *label; uint64_t best_txg = 0; uint64_t label_txg = 0; uint64_t asize; - unsigned char *nvl; - size_t nvl_size; + nvlist_t *nvl = NULL, *tmp; int error; label = malloc(sizeof (vdev_phys_t)); if (label == NULL) return (NULL); - nvl_size = VDEV_PHYS_SIZE - sizeof (zio_eck_t) - 4; - nvl = malloc(nvl_size); - if (nvl == NULL) - goto done; - for (int l = 0; l < VDEV_LABELS; l++) { const unsigned char *nvlist; if (vdev_label_read(vd, l, label, offsetof(vdev_label_t, vl_vdev_phys), sizeof (vdev_phys_t))) continue; - if (label->vp_nvlist[0] != NV_ENCODE_XDR) + nvlist = (const unsigned char *) label->vp_nvlist; + tmp = nvlist_import(nvlist + 4, nvlist[0], nvlist[1]); + if (tmp == NULL) continue; - nvlist = (const unsigned char *) label->vp_nvlist + 4; - error = nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, + error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, NULL, &label_txg, NULL); if (error != 0 || label_txg == 0) { - memcpy(nvl, nvlist, nvl_size); + nvlist_destroy(nvl); + nvl = tmp; goto done; } if (label_txg <= txg && label_txg > best_txg) { best_txg = label_txg; - memcpy(nvl, nvlist, nvl_size); + nvlist_destroy(nvl); + nvl = tmp; + tmp = NULL; /* * Use asize from pool config. We need this * because we can get bad value from BIOS. */ - if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE, + if (nvlist_find(nvl, ZPOOL_CONFIG_ASIZE, DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) { vd->v_psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE; } } + nvlist_destroy(tmp); } if (best_txg == 0) { - free(nvl); + nvlist_destroy(nvl); nvl = NULL; } done: free(label); return (nvl); } static void vdev_uberblock_load(vdev_t *vd, uberblock_t *ub) { uberblock_t *buf; buf = malloc(VDEV_UBERBLOCK_SIZE(vd)); if (buf == NULL) return; for (int l = 0; l < VDEV_LABELS; l++) { for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { if (vdev_label_read(vd, l, buf, VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd))) continue; if (uberblock_verify(buf) != 0) continue; if (vdev_uberblock_compare(buf, ub) > 0) *ub = *buf; } } free(buf); } static int vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap) { vdev_t vtmp; spa_t *spa; vdev_t *vdev; - unsigned char *nvlist; + nvlist_t *nvl; uint64_t val; uint64_t guid, vdev_children; uint64_t pool_txg, pool_guid; const char *pool_name; - const unsigned char *features; int rc, namelen; /* * Load the vdev label and figure out which * uberblock is most current. */ memset(&vtmp, 0, sizeof(vtmp)); vtmp.v_phys_read = _read; vtmp.v_read_priv = read_priv; vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv), (uint64_t)sizeof (vdev_label_t)); /* Test for minimum device size. */ if (vtmp.v_psize < SPA_MINDEVSIZE) return (EIO); - nvlist = vdev_label_read_config(&vtmp, UINT64_MAX); - if (nvlist == NULL) + nvl = vdev_label_read_config(&vtmp, UINT64_MAX); + if (nvl == NULL) return (EIO); - if (nvlist_find(nvlist, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, + if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64, NULL, &val, NULL) != 0) { - free(nvlist); + nvlist_destroy(nvl); return (EIO); } if (!SPA_VERSION_IS_SUPPORTED(val)) { printf("ZFS: unsupported ZFS version %u (should be %u)\n", (unsigned)val, (unsigned)SPA_VERSION); - free(nvlist); + nvlist_destroy(nvl); return (EIO); } /* Check ZFS features for read */ - if (nvlist_find(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ, - DATA_TYPE_NVLIST, NULL, &features, NULL) == 0 && - nvlist_check_features_for_read(features) != 0) { - free(nvlist); + rc = nvlist_check_features_for_read(nvl); + if (rc != 0) { + nvlist_destroy(nvl); return (EIO); } - if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, + if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64, NULL, &val, NULL) != 0) { - free(nvlist); + nvlist_destroy(nvl); return (EIO); } if (val == POOL_STATE_DESTROYED) { /* We don't boot only from destroyed pools. */ - free(nvlist); + nvlist_destroy(nvl); return (EIO); } - if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, + if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64, NULL, &pool_txg, NULL) != 0 || - nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, + nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64, NULL, &pool_guid, NULL) != 0 || - nvlist_find(nvlist, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, + nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING, NULL, &pool_name, &namelen) != 0) { /* * Cache and spare devices end up here - just ignore * them. */ - free(nvlist); + nvlist_destroy(nvl); return (EIO); } /* * Create the pool if this is the first time we've seen it. */ spa = spa_find_by_guid(pool_guid); if (spa == NULL) { char *name; - nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, + nvlist_find(nvl, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64, NULL, &vdev_children, NULL); name = malloc(namelen + 1); if (name == NULL) { - free(nvlist); + nvlist_destroy(nvl); return (ENOMEM); } bcopy(pool_name, name, namelen); name[namelen] = '\0'; spa = spa_create(pool_guid, name); free(name); if (spa == NULL) { - free(nvlist); + nvlist_destroy(nvl); return (ENOMEM); } spa->spa_root_vdev->v_nchildren = vdev_children; } if (pool_txg > spa->spa_txg) spa->spa_txg = pool_txg; /* * Get the vdev tree and create our in-core copy of it. * If we already have a vdev with this guid, this must * be some kind of alias (overlapping slices, dangerously dedicated * disks etc). */ - if (nvlist_find(nvlist, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, + if (nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64, NULL, &guid, NULL) != 0) { - free(nvlist); + nvlist_destroy(nvl); return (EIO); } vdev = vdev_find(guid); /* Has this vdev already been inited? */ if (vdev && vdev->v_phys_read) { - free(nvlist); + nvlist_destroy(nvl); return (EIO); } - rc = vdev_init_from_label(spa, nvlist); - free(nvlist); + rc = vdev_init_from_label(spa, nvl); + nvlist_destroy(nvl); if (rc != 0) return (rc); /* * We should already have created an incomplete vdev for this * vdev. Find it and initialise it with our read proc. */ vdev = vdev_find(guid); if (vdev != NULL) { vdev->v_phys_read = _read; vdev->v_read_priv = read_priv; vdev->v_psize = vtmp.v_psize; /* * If no other state is set, mark vdev healthy. */ if (vdev->v_state == VDEV_STATE_UNKNOWN) vdev->v_state = VDEV_STATE_HEALTHY; } else { printf("ZFS: inconsistent nvlist contents\n"); return (EIO); } if (vdev->v_islog) spa->spa_with_log = vdev->v_islog; /* * Re-evaluate top-level vdev state. */ vdev_set_state(vdev->v_top); /* * Ok, we are happy with the pool so far. Lets find * the best uberblock and then we can actually access * the contents of the pool. */ vdev_uberblock_load(vdev, &spa->spa_uberblock); if (spap != NULL) *spap = spa; return (0); } static int ilog2(int n) { int v; for (v = 0; v < 32; v++) if (n == (1 << v)) return (v); return (-1); } static int zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf) { blkptr_t gbh_bp; zio_gbh_phys_t zio_gb; char *pbuf; int i; /* Artificial BP for gang block header. */ gbh_bp = *bp; BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE); BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER); BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF); for (i = 0; i < SPA_DVAS_PER_BP; i++) DVA_SET_GANG(&gbh_bp.blk_dva[i], 0); /* Read gang header block using the artificial BP. */ if (zio_read(spa, &gbh_bp, &zio_gb)) return (EIO); pbuf = buf; for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { blkptr_t *gbp = &zio_gb.zg_blkptr[i]; if (BP_IS_HOLE(gbp)) continue; if (zio_read(spa, gbp, pbuf)) return (EIO); pbuf += BP_GET_PSIZE(gbp); } if (zio_checksum_verify(spa, bp, buf)) return (EIO); return (0); } static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf) { int cpfunc = BP_GET_COMPRESS(bp); uint64_t align, size; void *pbuf; int i, error; /* * Process data embedded in block pointer */ if (BP_IS_EMBEDDED(bp)) { ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); size = BPE_GET_PSIZE(bp); ASSERT(size <= BPE_PAYLOAD_SIZE); if (cpfunc != ZIO_COMPRESS_OFF) pbuf = malloc(size); else pbuf = buf; if (pbuf == NULL) return (ENOMEM); decode_embedded_bp_compressed(bp, pbuf); error = 0; if (cpfunc != ZIO_COMPRESS_OFF) { error = zio_decompress_data(cpfunc, pbuf, size, buf, BP_GET_LSIZE(bp)); free(pbuf); } if (error != 0) printf("ZFS: i/o error - unable to decompress " "block pointer data, error %d\n", error); return (error); } error = EIO; for (i = 0; i < SPA_DVAS_PER_BP; i++) { const dva_t *dva = &bp->blk_dva[i]; vdev_t *vdev; vdev_list_t *vlist; uint64_t vdevid; off_t offset; if (!dva->dva_word[0] && !dva->dva_word[1]) continue; vdevid = DVA_GET_VDEV(dva); offset = DVA_GET_OFFSET(dva); vlist = &spa->spa_root_vdev->v_children; STAILQ_FOREACH(vdev, vlist, v_childlink) { if (vdev->v_id == vdevid) break; } if (!vdev || !vdev->v_read) continue; size = BP_GET_PSIZE(bp); if (vdev->v_read == vdev_raidz_read) { align = 1ULL << vdev->v_ashift; if (P2PHASE(size, align) != 0) size = P2ROUNDUP(size, align); } if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF) pbuf = malloc(size); else pbuf = buf; if (pbuf == NULL) { error = ENOMEM; break; } if (DVA_GET_GANG(dva)) error = zio_read_gang(spa, bp, pbuf); else error = vdev->v_read(vdev, bp, pbuf, offset, size); if (error == 0) { if (cpfunc != ZIO_COMPRESS_OFF) error = zio_decompress_data(cpfunc, pbuf, BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp)); else if (size != BP_GET_PSIZE(bp)) bcopy(pbuf, buf, BP_GET_PSIZE(bp)); + } else { + printf("zio_read error: %d\n", error); } if (buf != pbuf) free(pbuf); if (error == 0) break; } if (error != 0) printf("ZFS: i/o error - all block copies unavailable\n"); return (error); } static int dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset, void *buf, size_t buflen) { int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT; int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; int nlevels = dnode->dn_nlevels; int i, rc; if (bsize > SPA_MAXBLOCKSIZE) { printf("ZFS: I/O error - blocks larger than %llu are not " "supported\n", SPA_MAXBLOCKSIZE); return (EIO); } /* * Note: bsize may not be a power of two here so we need to do an * actual divide rather than a bitshift. */ while (buflen > 0) { uint64_t bn = offset / bsize; int boff = offset % bsize; int ibn; const blkptr_t *indbp; blkptr_t bp; if (bn > dnode->dn_maxblkid) return (EIO); if (dnode == dnode_cache_obj && bn == dnode_cache_bn) goto cached; indbp = dnode->dn_blkptr; for (i = 0; i < nlevels; i++) { /* * Copy the bp from the indirect array so that * we can re-use the scratch buffer for multi-level * objects. */ ibn = bn >> ((nlevels - i - 1) * ibshift); ibn &= ((1 << ibshift) - 1); bp = indbp[ibn]; if (BP_IS_HOLE(&bp)) { memset(dnode_cache_buf, 0, bsize); break; } rc = zio_read(spa, &bp, dnode_cache_buf); if (rc) return (rc); indbp = (const blkptr_t *) dnode_cache_buf; } dnode_cache_obj = dnode; dnode_cache_bn = bn; cached: /* * The buffer contains our data block. Copy what we * need from it and loop. */ i = bsize - boff; if (i > buflen) i = buflen; memcpy(buf, &dnode_cache_buf[boff], i); buf = ((char *)buf) + i; offset += i; buflen -= i; } return (0); } /* * Lookup a value in a microzap directory. */ static int mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name, uint64_t *value) { const mzap_ent_phys_t *mze; int chunks, i; /* * Microzap objects use exactly one block. Read the whole * thing. */ chunks = size / MZAP_ENT_LEN - 1; for (i = 0; i < chunks; i++) { mze = &mz->mz_chunk[i]; if (strcmp(mze->mze_name, name) == 0) { *value = mze->mze_value; return (0); } } return (ENOENT); } /* * Compare a name with a zap leaf entry. Return non-zero if the name * matches. */ static int fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, const char *name) { size_t namelen; const zap_leaf_chunk_t *nc; const char *p; namelen = zc->l_entry.le_name_numints; nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); p = name; while (namelen > 0) { size_t len; len = namelen; if (len > ZAP_LEAF_ARRAY_BYTES) len = ZAP_LEAF_ARRAY_BYTES; if (memcmp(p, nc->l_array.la_array, len)) return (0); p += len; namelen -= len; nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); } return (1); } /* * Extract a uint64_t value from a zap leaf entry. */ static uint64_t fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc) { const zap_leaf_chunk_t *vc; int i; uint64_t value; const uint8_t *p; vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk); for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) { value = (value << 8) | p[i]; } return (value); } static void stv(int len, void *addr, uint64_t value) { switch (len) { case 1: *(uint8_t *)addr = value; return; case 2: *(uint16_t *)addr = value; return; case 4: *(uint32_t *)addr = value; return; case 8: *(uint64_t *)addr = value; return; } } /* * Extract a array from a zap leaf entry. */ static void fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, uint64_t integer_size, uint64_t num_integers, void *buf) { uint64_t array_int_len = zc->l_entry.le_value_intlen; uint64_t value = 0; uint64_t *u64 = buf; char *p = buf; int len = MIN(zc->l_entry.le_value_numints, num_integers); int chunk = zc->l_entry.le_value_chunk; int byten = 0; if (integer_size == 8 && len == 1) { *u64 = fzap_leaf_value(zl, zc); return; } while (len > 0) { struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array; int i; ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl)); for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) { value = (value << 8) | la->la_array[i]; byten++; if (byten == array_int_len) { stv(integer_size, p, value); byten = 0; len--; if (len == 0) return; p += integer_size; } } chunk = la->la_next; } } static int fzap_check_size(uint64_t integer_size, uint64_t num_integers) { switch (integer_size) { case 1: case 2: case 4: case 8: break; default: return (EINVAL); } if (integer_size * num_integers > ZAP_MAXVALUELEN) return (E2BIG); return (0); } static void zap_leaf_free(zap_leaf_t *leaf) { free(leaf->l_phys); free(leaf); } static int zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp) { int bs = FZAP_BLOCK_SHIFT(zap); int err; *lp = malloc(sizeof(**lp)); if (*lp == NULL) return (ENOMEM); (*lp)->l_bs = bs; (*lp)->l_phys = malloc(1 << bs); if ((*lp)->l_phys == NULL) { free(*lp); return (ENOMEM); } err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys, 1 << bs); if (err != 0) { zap_leaf_free(*lp); } return (err); } static int zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp) { int bs = FZAP_BLOCK_SHIFT(zap); uint64_t blk = idx >> (bs - 3); uint64_t off = idx & ((1 << (bs - 3)) - 1); uint64_t *buf; int rc; buf = malloc(1 << zap->zap_block_shift); if (buf == NULL) return (ENOMEM); rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs, buf, 1 << zap->zap_block_shift); if (rc == 0) *valp = buf[off]; free(buf); return (rc); } static int zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp) { if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) { *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx); return (0); } else { return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl, idx, valp)); } } #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n)))) static int zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp) { uint64_t idx, blk; int err; idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift); err = zap_idx_to_blk(zap, idx, &blk); if (err != 0) return (err); return (zap_get_leaf_byblk(zap, blk, lp)); } #define CHAIN_END 0xffff /* end of the chunk chain */ #define LEAF_HASH(l, h) \ ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \ ((h) >> \ (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len))) #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)]) static int zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name, uint64_t integer_size, uint64_t num_integers, void *value) { int rc; uint16_t *chunkp; struct zap_leaf_entry *le; /* * Make sure this chunk matches our hash. */ if (zl->l_phys->l_hdr.lh_prefix_len > 0 && zl->l_phys->l_hdr.lh_prefix != hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len)) return (EIO); rc = ENOENT; for (chunkp = LEAF_HASH_ENTPTR(zl, hash); *chunkp != CHAIN_END; chunkp = &le->le_next) { zap_leaf_chunk_t *zc; uint16_t chunk = *chunkp; le = ZAP_LEAF_ENTRY(zl, chunk); if (le->le_hash != hash) continue; zc = &ZAP_LEAF_CHUNK(zl, chunk); if (fzap_name_equal(zl, zc, name)) { if (zc->l_entry.le_value_intlen > integer_size) { rc = EINVAL; } else { fzap_leaf_array(zl, zc, integer_size, num_integers, value); rc = 0; } break; } } return (rc); } /* * Lookup a value in a fatzap directory. */ static int fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, const char *name, uint64_t integer_size, uint64_t num_integers, void *value) { int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; fat_zap_t z; zap_leaf_t *zl; uint64_t hash; int rc; if (zh->zap_magic != ZAP_MAGIC) return (EIO); if ((rc = fzap_check_size(integer_size, num_integers)) != 0) return (rc); z.zap_block_shift = ilog2(bsize); z.zap_phys = zh; z.zap_spa = spa; z.zap_dnode = dnode; hash = zap_hash(zh->zap_salt, name); rc = zap_deref_leaf(&z, hash, &zl); if (rc != 0) return (rc); rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value); zap_leaf_free(zl); return (rc); } /* * Lookup a name in a zap object and return its value as a uint64_t. */ static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name, uint64_t integer_size, uint64_t num_integers, void *value) { int rc; zap_phys_t *zap; size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; zap = malloc(size); if (zap == NULL) return (ENOMEM); rc = dnode_read(spa, dnode, 0, zap, size); if (rc) goto done; switch (zap->zap_block_type) { case ZBT_MICRO: rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value); break; case ZBT_HEADER: rc = fzap_lookup(spa, dnode, zap, name, integer_size, num_integers, value); break; default: printf("ZFS: invalid zap_type=%" PRIx64 "\n", zap->zap_block_type); rc = EIO; } done: free(zap); return (rc); } /* * List a microzap directory. */ static int mzap_list(const mzap_phys_t *mz, size_t size, int (*callback)(const char *, uint64_t)) { const mzap_ent_phys_t *mze; int chunks, i, rc; /* * Microzap objects use exactly one block. Read the whole * thing. */ rc = 0; chunks = size / MZAP_ENT_LEN - 1; for (i = 0; i < chunks; i++) { mze = &mz->mz_chunk[i]; if (mze->mze_name[0]) { rc = callback(mze->mze_name, mze->mze_value); if (rc != 0) break; } } return (rc); } /* * List a fatzap directory. */ static int fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, int (*callback)(const char *, uint64_t)) { int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; fat_zap_t z; uint64_t i; int j, rc; if (zh->zap_magic != ZAP_MAGIC) return (EIO); z.zap_block_shift = ilog2(bsize); z.zap_phys = zh; /* * This assumes that the leaf blocks start at block 1. The * documentation isn't exactly clear on this. */ zap_leaf_t zl; zl.l_bs = z.zap_block_shift; zl.l_phys = malloc(bsize); if (zl.l_phys == NULL) return (ENOMEM); for (i = 0; i < zh->zap_num_leafs; i++) { off_t off = ((off_t)(i + 1)) << zl.l_bs; char name[256], *p; uint64_t value; if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) { free(zl.l_phys); return (EIO); } for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { zap_leaf_chunk_t *zc, *nc; int namelen; zc = &ZAP_LEAF_CHUNK(&zl, j); if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) continue; namelen = zc->l_entry.le_name_numints; if (namelen > sizeof(name)) namelen = sizeof(name); /* * Paste the name back together. */ nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk); p = name; while (namelen > 0) { int len; len = namelen; if (len > ZAP_LEAF_ARRAY_BYTES) len = ZAP_LEAF_ARRAY_BYTES; memcpy(p, nc->l_array.la_array, len); p += len; namelen -= len; nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next); } /* * Assume the first eight bytes of the value are * a uint64_t. */ value = fzap_leaf_value(&zl, zc); /* printf("%s 0x%jx\n", name, (uintmax_t)value); */ rc = callback((const char *)name, value); if (rc != 0) { free(zl.l_phys); return (rc); } } } free(zl.l_phys); return (0); } static int zfs_printf(const char *name, uint64_t value __unused) { printf("%s\n", name); return (0); } /* * List a zap directory. */ static int zap_list(const spa_t *spa, const dnode_phys_t *dnode) { zap_phys_t *zap; size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; int rc; zap = malloc(size); if (zap == NULL) return (ENOMEM); rc = dnode_read(spa, dnode, 0, zap, size); if (rc == 0) { if (zap->zap_block_type == ZBT_MICRO) rc = mzap_list((const mzap_phys_t *)zap, size, zfs_printf); else rc = fzap_list(spa, dnode, zap, zfs_printf); } free(zap); return (rc); } static int objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum, dnode_phys_t *dnode) { off_t offset; offset = objnum * sizeof(dnode_phys_t); return dnode_read(spa, &os->os_meta_dnode, offset, dnode, sizeof(dnode_phys_t)); } /* * Lookup a name in a microzap directory. */ static int mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value) { const mzap_ent_phys_t *mze; int chunks, i; /* * Microzap objects use exactly one block. Read the whole * thing. */ chunks = size / MZAP_ENT_LEN - 1; for (i = 0; i < chunks; i++) { mze = &mz->mz_chunk[i]; if (value == mze->mze_value) { strcpy(name, mze->mze_name); return (0); } } return (ENOENT); } static void fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name) { size_t namelen; const zap_leaf_chunk_t *nc; char *p; namelen = zc->l_entry.le_name_numints; nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk); p = name; while (namelen > 0) { size_t len; len = namelen; if (len > ZAP_LEAF_ARRAY_BYTES) len = ZAP_LEAF_ARRAY_BYTES; memcpy(p, nc->l_array.la_array, len); p += len; namelen -= len; nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next); } *p = '\0'; } static int fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh, char *name, uint64_t value) { int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; fat_zap_t z; uint64_t i; int j, rc; if (zh->zap_magic != ZAP_MAGIC) return (EIO); z.zap_block_shift = ilog2(bsize); z.zap_phys = zh; /* * This assumes that the leaf blocks start at block 1. The * documentation isn't exactly clear on this. */ zap_leaf_t zl; zl.l_bs = z.zap_block_shift; zl.l_phys = malloc(bsize); if (zl.l_phys == NULL) return (ENOMEM); for (i = 0; i < zh->zap_num_leafs; i++) { off_t off = ((off_t)(i + 1)) << zl.l_bs; rc = dnode_read(spa, dnode, off, zl.l_phys, bsize); if (rc != 0) goto done; for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) { zap_leaf_chunk_t *zc; zc = &ZAP_LEAF_CHUNK(&zl, j); if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY) continue; if (zc->l_entry.le_value_intlen != 8 || zc->l_entry.le_value_numints != 1) continue; if (fzap_leaf_value(&zl, zc) == value) { fzap_name_copy(&zl, zc, name); goto done; } } } rc = ENOENT; done: free(zl.l_phys); return (rc); } static int zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name, uint64_t value) { zap_phys_t *zap; size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT; int rc; zap = malloc(size); if (zap == NULL) return (ENOMEM); rc = dnode_read(spa, dnode, 0, zap, size); if (rc == 0) { if (zap->zap_block_type == ZBT_MICRO) rc = mzap_rlookup((const mzap_phys_t *)zap, size, name, value); else rc = fzap_rlookup(spa, dnode, zap, name, value); } free(zap); return (rc); } static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result) { char name[256]; char component[256]; uint64_t dir_obj, parent_obj, child_dir_zapobj; dnode_phys_t child_dir_zap, dataset, dir, parent; dsl_dir_phys_t *dd; dsl_dataset_phys_t *ds; char *p; int len; p = &name[sizeof(name) - 1]; *p = '\0'; if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); return (EIO); } ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; dir_obj = ds->ds_dir_obj; for (;;) { if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0) return (EIO); dd = (dsl_dir_phys_t *)&dir.dn_bonus; /* Actual loop condition. */ parent_obj = dd->dd_parent_obj; if (parent_obj == 0) break; if (objset_get_dnode(spa, &spa->spa_mos, parent_obj, &parent) != 0) return (EIO); dd = (dsl_dir_phys_t *)&parent.dn_bonus; child_dir_zapobj = dd->dd_child_dir_zapobj; if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0) return (EIO); if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0) return (EIO); len = strlen(component); p -= len; memcpy(p, component, len); --p; *p = '/'; /* Actual loop iteration. */ dir_obj = parent_obj; } if (*p != '\0') ++p; strcpy(result, p); return (0); } static int zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum) { char element[256]; uint64_t dir_obj, child_dir_zapobj; dnode_phys_t child_dir_zap, dir; dsl_dir_phys_t *dd; const char *p, *q; if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir)) return (EIO); if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj), 1, &dir_obj)) return (EIO); p = name; for (;;) { if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) return (EIO); dd = (dsl_dir_phys_t *)&dir.dn_bonus; while (*p == '/') p++; /* Actual loop condition #1. */ if (*p == '\0') break; q = strchr(p, '/'); if (q) { memcpy(element, p, q - p); element[q - p] = '\0'; p = q + 1; } else { strcpy(element, p); p += strlen(p); } child_dir_zapobj = dd->dd_child_dir_zapobj; if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0) return (EIO); /* Actual loop condition #2. */ if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj), 1, &dir_obj) != 0) return (ENOENT); } *objnum = dd->dd_head_dataset_obj; return (0); } #ifndef BOOT2 static int zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/) { uint64_t dir_obj, child_dir_zapobj; dnode_phys_t child_dir_zap, dir, dataset; dsl_dataset_phys_t *ds; dsl_dir_phys_t *dd; if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); return (EIO); } ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; dir_obj = ds->ds_dir_obj; if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) { printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); return (EIO); } dd = (dsl_dir_phys_t *)&dir.dn_bonus; child_dir_zapobj = dd->dd_child_dir_zapobj; if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap) != 0) { printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); return (EIO); } return (zap_list(spa, &child_dir_zap) != 0); } int zfs_callback_dataset(const spa_t *spa, uint64_t objnum, int (*callback)(const char *, uint64_t)) { uint64_t dir_obj, child_dir_zapobj; dnode_phys_t child_dir_zap, dir, dataset; dsl_dataset_phys_t *ds; dsl_dir_phys_t *dd; zap_phys_t *zap; size_t size; int err; err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset); if (err != 0) { printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); return (err); } ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; dir_obj = ds->ds_dir_obj; err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir); if (err != 0) { printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj); return (err); } dd = (dsl_dir_phys_t *)&dir.dn_bonus; child_dir_zapobj = dd->dd_child_dir_zapobj; err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj, &child_dir_zap); if (err != 0) { printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj); return (err); } size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT; zap = malloc(size); if (zap != NULL) { err = dnode_read(spa, &child_dir_zap, 0, zap, size); if (err != 0) goto done; if (zap->zap_block_type == ZBT_MICRO) err = mzap_list((const mzap_phys_t *)zap, size, callback); else err = fzap_list(spa, &child_dir_zap, zap, callback); } else { err = ENOMEM; } done: free(zap); return (err); } #endif /* * Find the object set given the object number of its dataset object * and return its details in *objset */ static int zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset) { dnode_phys_t dataset; dsl_dataset_phys_t *ds; if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) { printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum); return (EIO); } ds = (dsl_dataset_phys_t *)&dataset.dn_bonus; if (zio_read(spa, &ds->ds_bp, objset)) { printf("ZFS: can't read object set for dataset %ju\n", (uintmax_t)objnum); return (EIO); } return (0); } /* * Find the object set pointed to by the BOOTFS property or the root * dataset if there is none and return its details in *objset */ static int zfs_get_root(const spa_t *spa, uint64_t *objid) { dnode_phys_t dir, propdir; uint64_t props, bootfs, root; *objid = 0; /* * Start with the MOS directory object. */ if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir)) { printf("ZFS: can't read MOS object directory\n"); return (EIO); } /* * Lookup the pool_props and see if we can find a bootfs. */ if (zap_lookup(spa, &dir, DMU_POOL_PROPS, sizeof(props), 1, &props) == 0 && objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0 && zap_lookup(spa, &propdir, "bootfs", sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) { *objid = bootfs; return (0); } /* * Lookup the root dataset directory */ if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof(root), 1, &root) || objset_get_dnode(spa, &spa->spa_mos, root, &dir)) { printf("ZFS: can't find root dsl_dir\n"); return (EIO); } /* * Use the information from the dataset directory's bonus buffer * to find the dataset object and from that the object set itself. */ dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus; *objid = dd->dd_head_dataset_obj; return (0); } static int zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount) { mount->spa = spa; /* * Find the root object set if not explicitly provided */ if (rootobj == 0 && zfs_get_root(spa, &rootobj)) { printf("ZFS: can't find root filesystem\n"); return (EIO); } if (zfs_mount_dataset(spa, rootobj, &mount->objset)) { printf("ZFS: can't open root filesystem\n"); return (EIO); } mount->rootobj = rootobj; return (0); } /* * callback function for feature name checks. */ static int check_feature(const char *name, uint64_t value) { int i; if (value == 0) return (0); if (name[0] == '\0') return (0); for (i = 0; features_for_read[i] != NULL; i++) { if (strcmp(name, features_for_read[i]) == 0) return (0); } printf("ZFS: unsupported feature: %s\n", name); return (EIO); } /* * Checks whether the MOS features that are active are supported. */ static int check_mos_features(const spa_t *spa) { dnode_phys_t dir; zap_phys_t *zap; uint64_t objnum; size_t size; int rc; if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY, &dir)) != 0) return (rc); if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ, sizeof (objnum), 1, &objnum)) != 0) { /* * It is older pool without features. As we have already * tested the label, just return without raising the error. */ return (0); } if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0) return (rc); if (dir.dn_type != DMU_OTN_ZAP_METADATA) return (EIO); size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT; zap = malloc(size); if (zap == NULL) return (ENOMEM); if (dnode_read(spa, &dir, 0, zap, size)) { free(zap); return (EIO); } if (zap->zap_block_type == ZBT_MICRO) rc = mzap_list((const mzap_phys_t *)zap, size, check_feature); else rc = fzap_list(spa, &dir, zap, check_feature); free(zap); return (rc); } static int -load_nvlist(spa_t *spa, uint64_t obj, unsigned char **value) +load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) { dnode_phys_t dir; size_t size; int rc; unsigned char *nv; *value = NULL; if ((rc = objset_get_dnode(spa, &spa->spa_mos, obj, &dir)) != 0) return (rc); if (dir.dn_type != DMU_OT_PACKED_NVLIST && dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) { return (EIO); } if (dir.dn_bonuslen != sizeof (uint64_t)) return (EIO); size = *(uint64_t *)DN_BONUS(&dir); nv = malloc(size); if (nv == NULL) return (ENOMEM); rc = dnode_read(spa, &dir, 0, nv, size); if (rc != 0) { free(nv); nv = NULL; return (rc); } - *value = nv; + *value = nvlist_import(nv + 4, nv[0], nv[1]); + free(nv); return (rc); } static int zfs_spa_init(spa_t *spa) { dnode_phys_t dir; uint64_t config_object; - unsigned char *nvlist; + nvlist_t *nvlist; int rc; if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) { printf("ZFS: can't read MOS of pool %s\n", spa->spa_name); return (EIO); } if (spa->spa_mos.os_type != DMU_OST_META) { printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name); return (EIO); } if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT, &dir)) { printf("ZFS: failed to read pool %s directory object\n", spa->spa_name); return (EIO); } /* this is allowed to fail, older pools do not have salt */ rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1, sizeof (spa->spa_cksum_salt.zcs_bytes), spa->spa_cksum_salt.zcs_bytes); rc = check_mos_features(spa); if (rc != 0) { printf("ZFS: pool %s is not supported\n", spa->spa_name); return (rc); } rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG, sizeof (config_object), 1, &config_object); if (rc != 0) { printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG); return (EIO); } rc = load_nvlist(spa, config_object, &nvlist); if (rc != 0) return (rc); - /* * Update vdevs from MOS config. Note, we do skip encoding bytes * here. See also vdev_label_read_config(). */ - rc = vdev_init_from_nvlist(spa, nvlist + 4); - free(nvlist); + rc = vdev_init_from_nvlist(spa, nvlist); + nvlist_destroy(nvlist); return (rc); } static int zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb) { if (dn->dn_bonustype != DMU_OT_SA) { znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus; sb->st_mode = zp->zp_mode; sb->st_uid = zp->zp_uid; sb->st_gid = zp->zp_gid; sb->st_size = zp->zp_size; } else { sa_hdr_phys_t *sahdrp; int hdrsize; size_t size = 0; void *buf = NULL; if (dn->dn_bonuslen != 0) sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); else { if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) { blkptr_t *bp = DN_SPILL_BLKPTR(dn); int error; size = BP_GET_LSIZE(bp); buf = malloc(size); if (buf == NULL) error = ENOMEM; else error = zio_read(spa, bp, buf); if (error != 0) { free(buf); return (error); } sahdrp = buf; } else { return (EIO); } } hdrsize = SA_HDR_SIZE(sahdrp); sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize + SA_MODE_OFFSET); sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize + SA_UID_OFFSET); sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize + SA_GID_OFFSET); sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize + SA_SIZE_OFFSET); free(buf); } return (0); } static int zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize) { int rc = 0; if (dn->dn_bonustype == DMU_OT_SA) { sa_hdr_phys_t *sahdrp = NULL; size_t size = 0; void *buf = NULL; int hdrsize; char *p; if (dn->dn_bonuslen != 0) { sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn); } else { blkptr_t *bp; if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0) return (EIO); bp = DN_SPILL_BLKPTR(dn); size = BP_GET_LSIZE(bp); buf = malloc(size); if (buf == NULL) rc = ENOMEM; else rc = zio_read(spa, bp, buf); if (rc != 0) { free(buf); return (rc); } sahdrp = buf; } hdrsize = SA_HDR_SIZE(sahdrp); p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET); memcpy(path, p, psize); free(buf); return (0); } /* * Second test is purely to silence bogus compiler * warning about accessing past the end of dn_bonus. */ if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen && sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) { memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize); } else { rc = dnode_read(spa, dn, 0, path, psize); } return (rc); } struct obj_list { uint64_t objnum; STAILQ_ENTRY(obj_list) entry; }; /* * Lookup a file and return its dnode. */ static int zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode) { int rc; uint64_t objnum; const spa_t *spa; dnode_phys_t dn; const char *p, *q; char element[256]; char path[1024]; int symlinks_followed = 0; struct stat sb; struct obj_list *entry, *tentry; STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache); spa = mount->spa; if (mount->objset.os_type != DMU_OST_ZFS) { printf("ZFS: unexpected object set type %ju\n", (uintmax_t)mount->objset.os_type); return (EIO); } if ((entry = malloc(sizeof(struct obj_list))) == NULL) return (ENOMEM); /* * Get the root directory dnode. */ rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn); if (rc) { free(entry); return (rc); } rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum); if (rc) { free(entry); return (rc); } entry->objnum = objnum; STAILQ_INSERT_HEAD(&on_cache, entry, entry); rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); if (rc != 0) goto done; p = upath; while (p && *p) { rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); if (rc != 0) goto done; while (*p == '/') p++; if (*p == '\0') break; q = p; while (*q != '\0' && *q != '/') q++; /* skip dot */ if (p + 1 == q && p[0] == '.') { p++; continue; } /* double dot */ if (p + 2 == q && p[0] == '.' && p[1] == '.') { p += 2; if (STAILQ_FIRST(&on_cache) == STAILQ_LAST(&on_cache, obj_list, entry)) { rc = ENOENT; goto done; } entry = STAILQ_FIRST(&on_cache); STAILQ_REMOVE_HEAD(&on_cache, entry); free(entry); objnum = (STAILQ_FIRST(&on_cache))->objnum; continue; } if (q - p + 1 > sizeof(element)) { rc = ENAMETOOLONG; goto done; } memcpy(element, p, q - p); element[q - p] = 0; p = q; if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0) goto done; if (!S_ISDIR(sb.st_mode)) { rc = ENOTDIR; goto done; } rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum); if (rc) goto done; objnum = ZFS_DIRENT_OBJ(objnum); if ((entry = malloc(sizeof(struct obj_list))) == NULL) { rc = ENOMEM; goto done; } entry->objnum = objnum; STAILQ_INSERT_HEAD(&on_cache, entry, entry); rc = objset_get_dnode(spa, &mount->objset, objnum, &dn); if (rc) goto done; /* * Check for symlink. */ rc = zfs_dnode_stat(spa, &dn, &sb); if (rc) goto done; if (S_ISLNK(sb.st_mode)) { if (symlinks_followed > 10) { rc = EMLINK; goto done; } symlinks_followed++; /* * Read the link value and copy the tail of our * current path onto the end. */ if (sb.st_size + strlen(p) + 1 > sizeof(path)) { rc = ENAMETOOLONG; goto done; } strcpy(&path[sb.st_size], p); rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size); if (rc != 0) goto done; /* * Restart with the new path, starting either at * the root or at the parent depending whether or * not the link is relative. */ p = path; if (*p == '/') { while (STAILQ_FIRST(&on_cache) != STAILQ_LAST(&on_cache, obj_list, entry)) { entry = STAILQ_FIRST(&on_cache); STAILQ_REMOVE_HEAD(&on_cache, entry); free(entry); } } else { entry = STAILQ_FIRST(&on_cache); STAILQ_REMOVE_HEAD(&on_cache, entry); free(entry); } objnum = (STAILQ_FIRST(&on_cache))->objnum; } } *dnode = dn; done: STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry) free(entry); return (rc); } Index: head/stand/loader.mk =================================================================== --- head/stand/loader.mk (revision 362430) +++ head/stand/loader.mk (revision 362431) @@ -1,177 +1,178 @@ # $FreeBSD$ .PATH: ${LDRSRC} ${BOOTSRC}/libsa CFLAGS+=-I${LDRSRC} SRCS+= boot.c commands.c console.c devopen.c interp.c SRCS+= interp_backslash.c interp_parse.c ls.c misc.c SRCS+= module.c .if ${MACHINE} == "i386" || ${MACHINE_CPUARCH} == "amd64" SRCS+= load_elf32.c load_elf32_obj.c reloc_elf32.c SRCS+= load_elf64.c load_elf64_obj.c reloc_elf64.c .elif ${MACHINE_CPUARCH} == "aarch64" SRCS+= load_elf64.c reloc_elf64.c .elif ${MACHINE_CPUARCH} == "arm" SRCS+= load_elf32.c reloc_elf32.c .elif ${MACHINE_CPUARCH} == "powerpc" SRCS+= load_elf32.c reloc_elf32.c SRCS+= load_elf64.c reloc_elf64.c SRCS+= metadata.c .elif ${MACHINE_ARCH:Mmips64*} != "" SRCS+= load_elf64.c reloc_elf64.c SRCS+= metadata.c .elif ${MACHINE} == "mips" SRCS+= load_elf32.c reloc_elf32.c SRCS+= metadata.c .endif .if ${LOADER_DISK_SUPPORT:Uyes} == "yes" CFLAGS.part.c+= -DHAVE_MEMCPY -I${SRCTOP}/sys/contrib/zlib SRCS+= disk.c part.c vdisk.c .endif .if ${LOADER_NET_SUPPORT:Uno} == "yes" SRCS+= dev_net.c .endif .if defined(HAVE_BCACHE) SRCS+= bcache.c .endif .if defined(MD_IMAGE_SIZE) CFLAGS+= -DMD_IMAGE_SIZE=${MD_IMAGE_SIZE} SRCS+= md.c .else CLEANFILES+= md.o .endif # Machine-independant ISA PnP .if defined(HAVE_ISABUS) SRCS+= isapnp.c .endif .if defined(HAVE_PNP) SRCS+= pnp.c .endif .if ${LOADER_INTERP} == "lua" SRCS+= interp_lua.c .include "${BOOTSRC}/lua.mk" LDR_INTERP= ${LIBLUA} LDR_INTERP32= ${LIBLUA32} CFLAGS.interp_lua.c= -DLUA_PATH=\"${LUAPATH}\" -I${FLUASRC}/modules .elif ${LOADER_INTERP} == "4th" SRCS+= interp_forth.c .include "${BOOTSRC}/ficl.mk" LDR_INTERP= ${LIBFICL} LDR_INTERP32= ${LIBFICL32} .elif ${LOADER_INTERP} == "simp" SRCS+= interp_simple.c .else .error Unknown interpreter ${LOADER_INTERP} .endif .if ${MK_LOADER_VERIEXEC} != "no" CFLAGS+= -DLOADER_VERIEXEC -I${SRCTOP}/lib/libsecureboot/h .if ${MK_LOADER_VERIEXEC_VECTX} != "no" CFLAGS+= -DLOADER_VERIEXEC_VECTX .endif .endif .if ${MK_LOADER_VERIEXEC_PASS_MANIFEST} != "no" CFLAGS+= -DLOADER_VERIEXEC_PASS_MANIFEST -I${SRCTOP}/lib/libsecureboot/h .endif .if defined(BOOT_PROMPT_123) CFLAGS+= -DBOOT_PROMPT_123 .endif .if defined(LOADER_INSTALL_SUPPORT) SRCS+= install.c .endif # Filesystem support .if ${LOADER_CD9660_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_CD9660_SUPPORT .endif .if ${LOADER_EXT2FS_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_EXT2FS_SUPPORT .endif .if ${LOADER_MSDOS_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_MSDOS_SUPPORT .endif .if ${LOADER_UFS_SUPPORT:Uyes} == "yes" CFLAGS+= -DLOADER_UFS_SUPPORT .endif # Compression .if ${LOADER_GZIP_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_GZIP_SUPPORT .endif .if ${LOADER_BZIP2_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_BZIP2_SUPPORT .endif # Network related things .if ${LOADER_NET_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_NET_SUPPORT .endif .if ${LOADER_NFS_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_NFS_SUPPORT .endif .if ${LOADER_TFTP_SUPPORT:Uno} == "yes" CFLAGS+= -DLOADER_TFTP_SUPPORT .endif # Partition support .if ${LOADER_GPT_SUPPORT:Uyes} == "yes" CFLAGS+= -DLOADER_GPT_SUPPORT .endif .if ${LOADER_MBR_SUPPORT:Uyes} == "yes" CFLAGS+= -DLOADER_MBR_SUPPORT .endif .if ${HAVE_ZFS:Uno} == "yes" CFLAGS+= -DLOADER_ZFS_SUPPORT CFLAGS+= -I${ZFSSRC} CFLAGS+= -I${SYSDIR}/cddl/boot/zfs +CFLAGS+= -I${SYSDIR}/cddl/contrib/opensolaris/uts/common SRCS+= zfs_cmd.c .endif LIBFICL= ${BOOTOBJ}/ficl/libficl.a .if ${MACHINE} == "i386" LIBFICL32= ${LIBFICL} .else LIBFICL32= ${BOOTOBJ}/ficl32/libficl.a .endif LIBLUA= ${BOOTOBJ}/liblua/liblua.a .if ${MACHINE} == "i386" LIBLUA32= ${LIBLUA} .else LIBLUA32= ${BOOTOBJ}/liblua32/liblua.a .endif CLEANFILES+= vers.c VERSION_FILE?= ${.CURDIR}/version .if ${MK_REPRODUCIBLE_BUILD} != no REPRO_FLAG= -r .endif vers.c: ${LDRSRC}/newvers.sh ${VERSION_FILE} sh ${LDRSRC}/newvers.sh ${REPRO_FLAG} ${VERSION_FILE} \ ${NEWVERSWHAT} .if ${MK_LOADER_VERBOSE} != "no" CFLAGS+= -DELF_VERBOSE .endif .if !empty(HELP_FILES) HELP_FILES+= ${LDRSRC}/help.common CLEANFILES+= loader.help FILES+= loader.help loader.help: ${HELP_FILES} cat ${HELP_FILES} | awk -f ${LDRSRC}/merge_help.awk > ${.TARGET} .endif Index: head/stand/userboot/userboot/Makefile =================================================================== --- head/stand/userboot/userboot/Makefile (revision 362430) +++ head/stand/userboot/userboot/Makefile (revision 362431) @@ -1,59 +1,60 @@ # $FreeBSD$ LOADER_MSDOS_SUPPORT?= yes LOADER_UFS_SUPPORT?= yes LOADER_CD9660_SUPPORT?= no LOADER_EXT2FS_SUPPORT?= no PIC=yes .include SHLIB_NAME= userboot_${LOADER_INTERP}.so STRIP= LIBDIR= /boot .PATH: ${.CURDIR}/../userboot SRCS= autoload.c SRCS+= bcache.c SRCS+= biossmap.c SRCS+= bootinfo.c SRCS+= bootinfo32.c SRCS+= bootinfo64.c SRCS+= conf.c SRCS+= console.c SRCS+= copy.c SRCS+= devicename.c SRCS+= elf32_freebsd.c SRCS+= elf64_freebsd.c SRCS+= host.c SRCS+= main.c SRCS+= userboot_cons.c SRCS+= userboot_disk.c SRCS+= vers.c CFLAGS+= -Wall CFLAGS+= -I${BOOTSRC}/userboot +CFLAGS+= -I${SYSDIR}/cddl/contrib/opensolaris/uts/common CWARNFLAGS.main.c += -Wno-implicit-function-declaration LDFLAGS+= -nostdlib -Wl,-Bsymbolic NEWVERSWHAT= "User boot ${LOADER_INTERP}" ${MACHINE_CPUARCH} VERSION_FILE= ${.CURDIR}/../userboot/version .if ${LOADER_INTERP} == ${LOADER_DEFAULT_INTERP} LINKS+= ${BINDIR}/${SHLIB_NAME} ${BINDIR}/userboot.so .endif .if ${MK_LOADER_ZFS} != "no" CFLAGS+= -DUSERBOOT_ZFS_SUPPORT HAVE_ZFS=yes .endif # Always add MI sources .include "${BOOTSRC}/loader.mk" CFLAGS+= -I. DPADD+= ${LDR_INTERP} ${LIBSA} LDADD+= ${LDR_INTERP} ${LIBSA} .include Index: head/sys/cddl/boot/zfs/zfsimpl.h =================================================================== --- head/sys/cddl/boot/zfs/zfsimpl.h (revision 362430) +++ head/sys/cddl/boot/zfs/zfsimpl.h (revision 362431) @@ -1,1813 +1,1842 @@ /*- * Copyright (c) 2002 McAfee, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Marshall * Kirk McKusick and McAfee Research,, the Security Research Division of * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as * part of the DARPA CHATS research program * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2009 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* * Copyright 2013 by Saso Kiselkov. All rights reserved. */ /* - * Copyright (c) 2013 by Delphix. All rights reserved. + * Copyright (c) 2020 by Delphix. All rights reserved. */ +#include +#include +#include + +#ifndef _ZFSIMPL_H_ +#define _ZFSIMPL_H_ + #define MAXNAMELEN 256 #define _NOTE(s) /* * AVL comparator helpers */ #define AVL_ISIGN(a) (((a) > 0) - ((a) < 0)) #define AVL_CMP(a, b) (((a) > (b)) - ((a) < (b))) #define AVL_PCMP(a, b) \ (((uintptr_t)(a) > (uintptr_t)(b)) - ((uintptr_t)(a) < (uintptr_t)(b))) typedef enum { B_FALSE, B_TRUE } boolean_t; /* CRC64 table */ #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ /* * Macros for various sorts of alignment and rounding when the alignment * is known to be a power of 2. */ #define P2ALIGN(x, align) ((x) & -(align)) #define P2PHASE(x, align) ((x) & ((align) - 1)) #define P2NPHASE(x, align) (-(x) & ((align) - 1)) #define P2ROUNDUP(x, align) (-(-(x) & -(align))) #define P2END(x, align) (-(~(x) & -(align))) #define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align))) #define P2BOUNDARY(off, len, align) (((off) ^ ((off) + (len) - 1)) > (align) - 1) /* * General-purpose 32-bit and 64-bit bitfield encodings. */ #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) #define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low)) #define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low)) #define BF32_GET(x, low, len) BF32_DECODE(x, low, len) #define BF64_GET(x, low, len) BF64_DECODE(x, low, len) #define BF32_SET(x, low, len, val) \ ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len)) #define BF64_SET(x, low, len, val) \ ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)) #define BF32_GET_SB(x, low, len, shift, bias) \ ((BF32_GET(x, low, len) + (bias)) << (shift)) #define BF64_GET_SB(x, low, len, shift, bias) \ ((BF64_GET(x, low, len) + (bias)) << (shift)) #define BF32_SET_SB(x, low, len, shift, bias, val) \ BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) #define BF64_SET_SB(x, low, len, shift, bias, val) \ BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) /* * Macros to reverse byte order */ #define BSWAP_8(x) ((x) & 0xff) #define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8)) #define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16)) #define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32)) #define SPA_MINBLOCKSHIFT 9 #define SPA_OLDMAXBLOCKSHIFT 17 #define SPA_MAXBLOCKSHIFT 24 #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) #define SPA_OLDMAXBLOCKSIZE (1ULL << SPA_OLDMAXBLOCKSHIFT) #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) /* * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. * The ASIZE encoding should be at least 64 times larger (6 more bits) * to support up to 4-way RAID-Z mirror mode with worst-case gang block * overhead, three DVAs per bp, plus one more bit in case we do anything * else that expands the ASIZE. */ #define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ #define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ /* * All SPA data is represented by 128-bit data virtual addresses (DVAs). * The members of the dva_t should be considered opaque outside the SPA. */ typedef struct dva { uint64_t dva_word[2]; } dva_t; /* * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. */ typedef struct zio_cksum { uint64_t zc_word[4]; } zio_cksum_t; /* * Some checksums/hashes need a 256-bit initialization salt. This salt is kept * secret and is suitable for use in MAC algorithms as the key. */ typedef struct zio_cksum_salt { uint8_t zcs_bytes[32]; } zio_cksum_salt_t; /* * Each block is described by its DVAs, time of birth, checksum, etc. * The word-by-word, bit-by-bit layout of the blkptr is as follows: * * 64 56 48 40 32 24 16 8 0 * +-------+-------+-------+-------+-------+-------+-------+-------+ * 0 | vdev1 | GRID | ASIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 1 |G| offset1 | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 2 | vdev2 | GRID | ASIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 3 |G| offset2 | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 4 | vdev3 | GRID | ASIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 5 |G| offset3 | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 7 | padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 8 | padding | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 9 | physical birth txg | * +-------+-------+-------+-------+-------+-------+-------+-------+ * a | logical birth txg | * +-------+-------+-------+-------+-------+-------+-------+-------+ * b | fill count | * +-------+-------+-------+-------+-------+-------+-------+-------+ * c | checksum[0] | * +-------+-------+-------+-------+-------+-------+-------+-------+ * d | checksum[1] | * +-------+-------+-------+-------+-------+-------+-------+-------+ * e | checksum[2] | * +-------+-------+-------+-------+-------+-------+-------+-------+ * f | checksum[3] | * +-------+-------+-------+-------+-------+-------+-------+-------+ * * Legend: * * vdev virtual device ID * offset offset into virtual device * LSIZE logical size * PSIZE physical size (after compression) * ASIZE allocated size (including RAID-Z parity and gang block headers) * GRID RAID-Z layout information (reserved for future use) * cksum checksum function * comp compression function * G gang block indicator * B byteorder (endianness) * D dedup * X encryption (on version 30, which is not supported) * E blkptr_t contains embedded data (see below) * lvl level of indirection * type DMU object type * phys birth txg of block allocation; zero if same as logical birth txg * log. birth transaction group in which the block was logically born * fill count number of non-zero blocks under this bp * checksum[4] 256-bit checksum of the data this bp describes */ /* * "Embedded" blkptr_t's don't actually point to a block, instead they * have a data payload embedded in the blkptr_t itself. See the comment * in blkptr.c for more details. * * The blkptr_t is laid out as follows: * * 64 56 48 40 32 24 16 8 0 * +-------+-------+-------+-------+-------+-------+-------+-------+ * 0 | payload | * 1 | payload | * 2 | payload | * 3 | payload | * 4 | payload | * 5 | payload | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE | * +-------+-------+-------+-------+-------+-------+-------+-------+ * 7 | payload | * 8 | payload | * 9 | payload | * +-------+-------+-------+-------+-------+-------+-------+-------+ * a | logical birth txg | * +-------+-------+-------+-------+-------+-------+-------+-------+ * b | payload | * c | payload | * d | payload | * e | payload | * f | payload | * +-------+-------+-------+-------+-------+-------+-------+-------+ * * Legend: * * payload contains the embedded data * B (byteorder) byteorder (endianness) * D (dedup) padding (set to zero) * X encryption (set to zero; see above) * E (embedded) set to one * lvl indirection level * type DMU object type * etype how to interpret embedded data (BP_EMBEDDED_TYPE_*) * comp compression function of payload * PSIZE size of payload after compression, in bytes * LSIZE logical size of payload, in bytes * note that 25 bits is enough to store the largest * "normal" BP's LSIZE (2^16 * 2^9) in bytes * log. birth transaction group in which the block was logically born * * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded * bp's they are stored in units of SPA_MINBLOCKSHIFT. * Generally, the generic BP_GET_*() macros can be used on embedded BP's. * The B, D, X, lvl, type, and comp fields are stored the same as with normal * BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must * be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before * other macros, as they assert that they are only used on BP's of the correct * "embedded-ness". */ #define BPE_GET_ETYPE(bp) \ (ASSERT(BP_IS_EMBEDDED(bp)), \ BF64_GET((bp)->blk_prop, 40, 8)) #define BPE_SET_ETYPE(bp, t) do { \ ASSERT(BP_IS_EMBEDDED(bp)); \ BF64_SET((bp)->blk_prop, 40, 8, t); \ _NOTE(CONSTCOND) } while (0) #define BPE_GET_LSIZE(bp) \ (ASSERT(BP_IS_EMBEDDED(bp)), \ BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1)) #define BPE_SET_LSIZE(bp, x) do { \ ASSERT(BP_IS_EMBEDDED(bp)); \ BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \ _NOTE(CONSTCOND) } while (0) #define BPE_GET_PSIZE(bp) \ (ASSERT(BP_IS_EMBEDDED(bp)), \ BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1)) #define BPE_SET_PSIZE(bp, x) do { \ ASSERT(BP_IS_EMBEDDED(bp)); \ BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \ _NOTE(CONSTCOND) } while (0) typedef enum bp_embedded_type { BP_EMBEDDED_TYPE_DATA, BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */ NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED } bp_embedded_type_t; #define BPE_NUM_WORDS 14 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t)) #define BPE_IS_PAYLOADWORD(bp, wp) \ ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth) #define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ #define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ typedef struct blkptr { dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */ uint64_t blk_prop; /* size, compression, type, etc */ uint64_t blk_pad[2]; /* Extra space for the future */ uint64_t blk_phys_birth; /* txg when block was allocated */ uint64_t blk_birth; /* transaction group at birth */ uint64_t blk_fill; /* fill count */ zio_cksum_t blk_cksum; /* 256-bit checksum */ } blkptr_t; /* * Macros to get and set fields in a bp or DVA. */ #define DVA_GET_ASIZE(dva) \ BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0) #define DVA_SET_ASIZE(dva, x) \ BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \ SPA_MINBLOCKSHIFT, 0, x) #define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8) #define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x) #define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32) #define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x) #define DVA_GET_OFFSET(dva) \ BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0) #define DVA_SET_OFFSET(dva, x) \ BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x) #define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1) #define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x) #define BP_GET_LSIZE(bp) \ (BP_IS_EMBEDDED(bp) ? \ (BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \ BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)) #define BP_SET_LSIZE(bp, x) do { \ ASSERT(!BP_IS_EMBEDDED(bp)); \ BF64_SET_SB((bp)->blk_prop, \ 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \ _NOTE(CONSTCOND) } while (0) #define BP_GET_PSIZE(bp) \ BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1) #define BP_SET_PSIZE(bp, x) \ BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x) #define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 7) #define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 7, x) #define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8) #define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x) #define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8) #define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x) #define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5) #define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x) #define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1) #define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1) #define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x) #define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1) #define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x) #define BP_PHYSICAL_BIRTH(bp) \ ((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth) #define BP_GET_ASIZE(bp) \ (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ DVA_GET_ASIZE(&(bp)->blk_dva[2])) #define BP_GET_UCSIZE(bp) \ ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \ BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp)); #define BP_GET_NDVAS(bp) \ (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \ !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \ !!DVA_GET_ASIZE(&(bp)->blk_dva[2])) #define DVA_EQUAL(dva1, dva2) \ ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \ (dva1)->dva_word[0] == (dva2)->dva_word[0]) #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \ (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \ ((zc1).zc_word[1] - (zc2).zc_word[1]) | \ ((zc1).zc_word[2] - (zc2).zc_word[2]) | \ ((zc1).zc_word[3] - (zc2).zc_word[3]))) #define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0) #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \ { \ (zcp)->zc_word[0] = w0; \ (zcp)->zc_word[1] = w1; \ (zcp)->zc_word[2] = w2; \ (zcp)->zc_word[3] = w3; \ } #define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) #define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \ (dva)->dva_word[1] == 0ULL) #define BP_IS_HOLE(bp) DVA_IS_EMPTY(BP_IDENTITY(bp)) #define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg)) #define BP_ZERO(bp) \ { \ (bp)->blk_dva[0].dva_word[0] = 0; \ (bp)->blk_dva[0].dva_word[1] = 0; \ (bp)->blk_dva[1].dva_word[0] = 0; \ (bp)->blk_dva[1].dva_word[1] = 0; \ (bp)->blk_dva[2].dva_word[0] = 0; \ (bp)->blk_dva[2].dva_word[1] = 0; \ (bp)->blk_prop = 0; \ (bp)->blk_pad[0] = 0; \ (bp)->blk_pad[1] = 0; \ (bp)->blk_phys_birth = 0; \ (bp)->blk_birth = 0; \ (bp)->blk_fill = 0; \ ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ } #if BYTE_ORDER == _BIG_ENDIAN #define ZFS_HOST_BYTEORDER (0ULL) #else #define ZFS_HOST_BYTEORDER (1ULL) #endif #define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER) #define BPE_NUM_WORDS 14 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t)) #define BPE_IS_PAYLOADWORD(bp, wp) \ ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth) /* * Embedded checksum */ #define ZEC_MAGIC 0x210da7ab10c7a11ULL typedef struct zio_eck { uint64_t zec_magic; /* for validation, endianness */ zio_cksum_t zec_cksum; /* 256-bit checksum */ } zio_eck_t; /* * Gang block headers are self-checksumming and contain an array * of block pointers. */ #define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE #define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \ sizeof (zio_eck_t)) / sizeof (blkptr_t)) #define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \ sizeof (zio_eck_t) - \ (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\ sizeof (uint64_t)) typedef struct zio_gbh { blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS]; uint64_t zg_filler[SPA_GBH_FILLER]; zio_eck_t zg_tail; } zio_gbh_phys_t; #define VDEV_RAIDZ_MAXPARITY 3 #define VDEV_PAD_SIZE (8 << 10) -/* 2 padding areas (vl_pad1 and vl_pad2) to skip */ +/* 2 padding areas (vl_pad1 and vl_be) to skip */ #define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2 #define VDEV_PHYS_SIZE (112 << 10) #define VDEV_UBERBLOCK_RING (128 << 10) /* * MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock * ring when MMP is enabled. */ #define MMP_BLOCKS_PER_LABEL 1 /* The largest uberblock we support is 8k. */ #define MAX_UBERBLOCK_SHIFT (13) #define VDEV_UBERBLOCK_SHIFT(vd) \ MIN(MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT), MAX_UBERBLOCK_SHIFT) #define VDEV_UBERBLOCK_COUNT(vd) \ (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd)) #define VDEV_UBERBLOCK_OFFSET(vd, n) \ offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)]) #define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd)) typedef struct vdev_phys { char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)]; zio_eck_t vp_zbt; } vdev_phys_t; +typedef enum vbe_vers { + /* The bootenv file is stored as ascii text in the envblock */ + VB_RAW = 0, + + /* + * The bootenv file is converted to an nvlist and then packed into the + * envblock. + */ + VB_NVLIST = 1 +} vbe_vers_t; + +typedef struct vdev_boot_envblock { + uint64_t vbe_version; + char vbe_bootenv[VDEV_PAD_SIZE - sizeof (uint64_t) - + sizeof (zio_eck_t)]; + zio_eck_t vbe_zbt; +} vdev_boot_envblock_t; + +CTASSERT(sizeof (vdev_boot_envblock_t) == VDEV_PAD_SIZE); + typedef struct vdev_label { char vl_pad1[VDEV_PAD_SIZE]; /* 8K */ - char vl_pad2[VDEV_PAD_SIZE]; /* 8K */ + vdev_boot_envblock_t vl_be; /* 8K */ vdev_phys_t vl_vdev_phys; /* 112K */ char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */ } vdev_label_t; /* 256K total */ /* * vdev_dirty() flags */ #define VDD_METASLAB 0x01 #define VDD_DTL 0x02 /* * Size and offset of embedded boot loader region on each label. * The total size of the first two labels plus the boot area is 4MB. */ #define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t)) #define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */ /* * Size of label regions at the start and end of each leaf device. */ #define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE) #define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t)) #define VDEV_LABELS 4 enum zio_checksum { ZIO_CHECKSUM_INHERIT = 0, ZIO_CHECKSUM_ON, ZIO_CHECKSUM_OFF, ZIO_CHECKSUM_LABEL, ZIO_CHECKSUM_GANG_HEADER, ZIO_CHECKSUM_ZILOG, ZIO_CHECKSUM_FLETCHER_2, ZIO_CHECKSUM_FLETCHER_4, ZIO_CHECKSUM_SHA256, ZIO_CHECKSUM_ZILOG2, ZIO_CHECKSUM_NOPARITY, ZIO_CHECKSUM_SHA512, ZIO_CHECKSUM_SKEIN, ZIO_CHECKSUM_EDONR, ZIO_CHECKSUM_FUNCTIONS }; #define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4 #define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON enum zio_compress { ZIO_COMPRESS_INHERIT = 0, ZIO_COMPRESS_ON, ZIO_COMPRESS_OFF, ZIO_COMPRESS_LZJB, ZIO_COMPRESS_EMPTY, ZIO_COMPRESS_GZIP_1, ZIO_COMPRESS_GZIP_2, ZIO_COMPRESS_GZIP_3, ZIO_COMPRESS_GZIP_4, ZIO_COMPRESS_GZIP_5, ZIO_COMPRESS_GZIP_6, ZIO_COMPRESS_GZIP_7, ZIO_COMPRESS_GZIP_8, ZIO_COMPRESS_GZIP_9, ZIO_COMPRESS_ZLE, ZIO_COMPRESS_LZ4, ZIO_COMPRESS_FUNCTIONS }; #define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB #define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF /* nvlist pack encoding */ #define NV_ENCODE_NATIVE 0 #define NV_ENCODE_XDR 1 typedef enum { DATA_TYPE_UNKNOWN = 0, DATA_TYPE_BOOLEAN, DATA_TYPE_BYTE, DATA_TYPE_INT16, DATA_TYPE_UINT16, DATA_TYPE_INT32, DATA_TYPE_UINT32, DATA_TYPE_INT64, DATA_TYPE_UINT64, DATA_TYPE_STRING, DATA_TYPE_BYTE_ARRAY, DATA_TYPE_INT16_ARRAY, DATA_TYPE_UINT16_ARRAY, DATA_TYPE_INT32_ARRAY, DATA_TYPE_UINT32_ARRAY, DATA_TYPE_INT64_ARRAY, DATA_TYPE_UINT64_ARRAY, DATA_TYPE_STRING_ARRAY, DATA_TYPE_HRTIME, DATA_TYPE_NVLIST, DATA_TYPE_NVLIST_ARRAY, DATA_TYPE_BOOLEAN_VALUE, DATA_TYPE_INT8, DATA_TYPE_UINT8, DATA_TYPE_BOOLEAN_ARRAY, DATA_TYPE_INT8_ARRAY, DATA_TYPE_UINT8_ARRAY } data_type_t; /* * On-disk version number. */ #define SPA_VERSION_1 1ULL #define SPA_VERSION_2 2ULL #define SPA_VERSION_3 3ULL #define SPA_VERSION_4 4ULL #define SPA_VERSION_5 5ULL #define SPA_VERSION_6 6ULL #define SPA_VERSION_7 7ULL #define SPA_VERSION_8 8ULL #define SPA_VERSION_9 9ULL #define SPA_VERSION_10 10ULL #define SPA_VERSION_11 11ULL #define SPA_VERSION_12 12ULL #define SPA_VERSION_13 13ULL #define SPA_VERSION_14 14ULL #define SPA_VERSION_15 15ULL #define SPA_VERSION_16 16ULL #define SPA_VERSION_17 17ULL #define SPA_VERSION_18 18ULL #define SPA_VERSION_19 19ULL #define SPA_VERSION_20 20ULL #define SPA_VERSION_21 21ULL #define SPA_VERSION_22 22ULL #define SPA_VERSION_23 23ULL #define SPA_VERSION_24 24ULL #define SPA_VERSION_25 25ULL #define SPA_VERSION_26 26ULL #define SPA_VERSION_27 27ULL #define SPA_VERSION_28 28ULL #define SPA_VERSION_5000 5000ULL /* * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*}, * and do the appropriate changes. Also bump the version number in * usr/src/grub/capability. */ #define SPA_VERSION SPA_VERSION_5000 #define SPA_VERSION_STRING "5000" /* * Symbolic names for the changes that caused a SPA_VERSION switch. * Used in the code when checking for presence or absence of a feature. * Feel free to define multiple symbolic names for each version if there * were multiple changes to on-disk structures during that version. * * NOTE: When checking the current SPA_VERSION in your code, be sure * to use spa_version() since it reports the version of the * last synced uberblock. Checking the in-flight version can * be dangerous in some cases. */ #define SPA_VERSION_INITIAL SPA_VERSION_1 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2 #define SPA_VERSION_SPARES SPA_VERSION_3 #define SPA_VERSION_RAID6 SPA_VERSION_3 #define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5 #define SPA_VERSION_BOOTFS SPA_VERSION_6 #define SPA_VERSION_SLOGS SPA_VERSION_7 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8 #define SPA_VERSION_FUID SPA_VERSION_9 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9 #define SPA_VERSION_REFQUOTA SPA_VERSION_9 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9 #define SPA_VERSION_L2CACHE SPA_VERSION_10 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11 #define SPA_VERSION_ORIGIN SPA_VERSION_11 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14 #define SPA_VERSION_USERSPACE SPA_VERSION_15 #define SPA_VERSION_STMF_PROP SPA_VERSION_16 #define SPA_VERSION_RAIDZ3 SPA_VERSION_17 #define SPA_VERSION_USERREFS SPA_VERSION_18 #define SPA_VERSION_HOLES SPA_VERSION_19 #define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20 #define SPA_VERSION_DEDUP SPA_VERSION_21 #define SPA_VERSION_RECVD_PROPS SPA_VERSION_22 #define SPA_VERSION_SLIM_ZIL SPA_VERSION_23 #define SPA_VERSION_SA SPA_VERSION_24 #define SPA_VERSION_SCAN SPA_VERSION_25 #define SPA_VERSION_DIR_CLONES SPA_VERSION_26 #define SPA_VERSION_DEADLISTS SPA_VERSION_26 #define SPA_VERSION_FAST_SNAP SPA_VERSION_27 #define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28 #define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28 #define SPA_VERSION_FEATURES SPA_VERSION_5000 #define SPA_VERSION_IS_SUPPORTED(v) \ (((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \ ((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION)) /* * The following are configuration names used in the nvlist describing a pool's * configuration. */ #define ZPOOL_CONFIG_VERSION "version" #define ZPOOL_CONFIG_POOL_NAME "name" #define ZPOOL_CONFIG_POOL_STATE "state" #define ZPOOL_CONFIG_POOL_TXG "txg" #define ZPOOL_CONFIG_POOL_GUID "pool_guid" #define ZPOOL_CONFIG_CREATE_TXG "create_txg" #define ZPOOL_CONFIG_TOP_GUID "top_guid" #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree" #define ZPOOL_CONFIG_TYPE "type" #define ZPOOL_CONFIG_CHILDREN "children" #define ZPOOL_CONFIG_ID "id" #define ZPOOL_CONFIG_GUID "guid" #define ZPOOL_CONFIG_INDIRECT_OBJECT "com.delphix:indirect_object" #define ZPOOL_CONFIG_INDIRECT_BIRTHS "com.delphix:indirect_births" #define ZPOOL_CONFIG_PREV_INDIRECT_VDEV "com.delphix:prev_indirect_vdev" #define ZPOOL_CONFIG_PATH "path" #define ZPOOL_CONFIG_DEVID "devid" #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array" #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift" #define ZPOOL_CONFIG_ASHIFT "ashift" #define ZPOOL_CONFIG_ASIZE "asize" #define ZPOOL_CONFIG_DTL "DTL" #define ZPOOL_CONFIG_STATS "stats" #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk" #define ZPOOL_CONFIG_ERRCOUNT "error_count" #define ZPOOL_CONFIG_NOT_PRESENT "not_present" #define ZPOOL_CONFIG_SPARES "spares" #define ZPOOL_CONFIG_IS_SPARE "is_spare" #define ZPOOL_CONFIG_NPARITY "nparity" #define ZPOOL_CONFIG_HOSTID "hostid" #define ZPOOL_CONFIG_HOSTNAME "hostname" #define ZPOOL_CONFIG_IS_LOG "is_log" #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */ #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" #define ZPOOL_CONFIG_VDEV_CHILDREN "vdev_children" /* * The persistent vdev state is stored as separate values rather than a single * 'vdev_state' entry. This is because a device can be in multiple states, such * as offline and degraded. */ #define ZPOOL_CONFIG_OFFLINE "offline" #define ZPOOL_CONFIG_FAULTED "faulted" #define ZPOOL_CONFIG_DEGRADED "degraded" #define ZPOOL_CONFIG_REMOVED "removed" #define ZPOOL_CONFIG_FRU "fru" #define ZPOOL_CONFIG_AUX_STATE "aux_state" #define VDEV_TYPE_ROOT "root" #define VDEV_TYPE_MIRROR "mirror" #define VDEV_TYPE_REPLACING "replacing" #define VDEV_TYPE_RAIDZ "raidz" #define VDEV_TYPE_DISK "disk" #define VDEV_TYPE_FILE "file" #define VDEV_TYPE_MISSING "missing" #define VDEV_TYPE_HOLE "hole" #define VDEV_TYPE_SPARE "spare" #define VDEV_TYPE_LOG "log" #define VDEV_TYPE_L2CACHE "l2cache" #define VDEV_TYPE_INDIRECT "indirect" /* * This is needed in userland to report the minimum necessary device size. */ #define SPA_MINDEVSIZE (64ULL << 20) /* * The location of the pool configuration repository, shared between kernel and * userland. */ #define ZPOOL_CACHE "/boot/zfs/zpool.cache" /* * vdev states are ordered from least to most healthy. * A vdev that's CANT_OPEN or below is considered unusable. */ typedef enum vdev_state { VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */ VDEV_STATE_CLOSED, /* Not currently open */ VDEV_STATE_OFFLINE, /* Not allowed to open */ VDEV_STATE_REMOVED, /* Explicitly removed from system */ VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */ VDEV_STATE_FAULTED, /* External request to fault device */ VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */ VDEV_STATE_HEALTHY /* Presumed good */ } vdev_state_t; /* * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field * of the vdev stats structure uses these constants to distinguish why. */ typedef enum vdev_aux { VDEV_AUX_NONE, /* no error */ VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */ VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */ VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */ VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */ VDEV_AUX_TOO_SMALL, /* vdev size is too small */ VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */ VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */ VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */ VDEV_AUX_SPARED /* hot spare used in another pool */ } vdev_aux_t; /* * pool state. The following states are written to disk as part of the normal * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are * software abstractions used at various levels to communicate pool state. */ typedef enum pool_state { POOL_STATE_ACTIVE = 0, /* In active use */ POOL_STATE_EXPORTED, /* Explicitly exported */ POOL_STATE_DESTROYED, /* Explicitly destroyed */ POOL_STATE_SPARE, /* Reserved for hot spare use */ POOL_STATE_UNINITIALIZED, /* Internal spa_t state */ POOL_STATE_UNAVAIL, /* Internal libzfs state */ POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */ } pool_state_t; /* * The uberblock version is incremented whenever an incompatible on-disk * format change is made to the SPA, DMU, or ZAP. * * Note: the first two fields should never be moved. When a storage pool * is opened, the uberblock must be read off the disk before the version * can be checked. If the ub_version field is moved, we may not detect * version mismatch. If the ub_magic field is moved, applications that * expect the magic number in the first word won't work. */ #define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */ #define UBERBLOCK_SHIFT 10 /* up to 1K */ #define MMP_MAGIC 0xa11cea11 /* all-see-all */ #define MMP_INTERVAL_VALID_BIT 0x01 #define MMP_SEQ_VALID_BIT 0x02 #define MMP_FAIL_INT_VALID_BIT 0x04 #define MMP_VALID(ubp) (ubp->ub_magic == UBERBLOCK_MAGIC && \ ubp->ub_mmp_magic == MMP_MAGIC) #define MMP_INTERVAL_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ MMP_INTERVAL_VALID_BIT)) #define MMP_SEQ_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ MMP_SEQ_VALID_BIT)) #define MMP_FAIL_INT_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ MMP_FAIL_INT_VALID_BIT)) #define MMP_INTERVAL(ubp) ((ubp->ub_mmp_config & 0x00000000FFFFFF00) \ >> 8) #define MMP_SEQ(ubp) ((ubp->ub_mmp_config & 0x0000FFFF00000000) \ >> 32) #define MMP_FAIL_INT(ubp) ((ubp->ub_mmp_config & 0xFFFF000000000000) \ >> 48) typedef struct uberblock { uint64_t ub_magic; /* UBERBLOCK_MAGIC */ uint64_t ub_version; /* SPA_VERSION */ uint64_t ub_txg; /* txg of last sync */ uint64_t ub_guid_sum; /* sum of all vdev guids */ uint64_t ub_timestamp; /* UTC time of last sync */ blkptr_t ub_rootbp; /* MOS objset_phys_t */ /* highest SPA_VERSION supported by software that wrote this txg */ uint64_t ub_software_version; /* Maybe missing in uberblocks we read, but always written */ uint64_t ub_mmp_magic; /* * If ub_mmp_delay == 0 and ub_mmp_magic is valid, MMP is off. * Otherwise, nanosec since last MMP write. */ uint64_t ub_mmp_delay; /* * The ub_mmp_config contains the multihost write interval, multihost * fail intervals, sequence number for sub-second granularity, and * valid bit mask. This layout is as follows: * * 64 56 48 40 32 24 16 8 0 * +-------+-------+-------+-------+-------+-------+-------+-------+ * 0 | Fail Intervals| Seq | Write Interval (ms) | VALID | * +-------+-------+-------+-------+-------+-------+-------+-------+ * * This allows a write_interval of (2^24/1000)s, over 4.5 hours * * VALID Bits: * - 0x01 - Write Interval (ms) * - 0x02 - Sequence number exists * - 0x04 - Fail Intervals * - 0xf8 - Reserved */ uint64_t ub_mmp_config; /* * ub_checkpoint_txg indicates two things about the current uberblock: * * 1] If it is not zero then this uberblock is a checkpoint. If it is * zero, then this uberblock is not a checkpoint. * * 2] On checkpointed uberblocks, the value of ub_checkpoint_txg is * the ub_txg that the uberblock had at the time we moved it to * the MOS config. * * The field is set when we checkpoint the uberblock and continues to * hold that value even after we've rewound (unlike the ub_txg that * is reset to a higher value). * * Besides checks used to determine whether we are reopening the * pool from a checkpointed uberblock [see spa_ld_select_uberblock()], * the value of the field is used to determine which ZIL blocks have * been allocated according to the ms_sm when we are rewinding to a * checkpoint. Specifically, if blk_birth > ub_checkpoint_txg, then * the ZIL block is not allocated [see uses of spa_min_claim_txg()]. */ uint64_t ub_checkpoint_txg; } uberblock_t; /* * Flags. */ #define DNODE_MUST_BE_ALLOCATED 1 #define DNODE_MUST_BE_FREE 2 /* * Fixed constants. */ #define DNODE_SHIFT 9 /* 512 bytes */ #define DN_MIN_INDBLKSHIFT 12 /* 4k */ #define DN_MAX_INDBLKSHIFT 17 /* 128k */ #define DNODE_BLOCK_SHIFT 14 /* 16k */ #define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ #define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ #define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ /* * Derived constants. */ #define DNODE_MIN_SIZE (1 << DNODE_SHIFT) #define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT) #define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT) #define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT) #define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT) #define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \ (1 << SPA_BLKPTRSHIFT)) #define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT) #define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE)) #define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> \ SPA_BLKPTRSHIFT) #define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT) #define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1) #define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT) #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) /* The +2 here is a cheesy way to round up */ #define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \ (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT))) #define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \ (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT) #define EPB(blkshift, typeshift) (1 << (blkshift - typeshift)) /* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ #define DNODE_FLAG_USED_BYTES (1<<0) #define DNODE_FLAG_USERUSED_ACCOUNTED (1<<1) /* Does dnode have a SA spill blkptr in bonus? */ #define DNODE_FLAG_SPILL_BLKPTR (1<<2) typedef struct dnode_phys { uint8_t dn_type; /* dmu_object_type_t */ uint8_t dn_indblkshift; /* ln2(indirect block size) */ uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ uint8_t dn_nblkptr; /* length of dn_blkptr */ uint8_t dn_bonustype; /* type of data in bonus buffer */ uint8_t dn_checksum; /* ZIO_CHECKSUM type */ uint8_t dn_compress; /* ZIO_COMPRESS type */ uint8_t dn_flags; /* DNODE_FLAG_* */ uint16_t dn_datablkszsec; /* data block size in 512b sectors */ uint16_t dn_bonuslen; /* length of dn_bonus */ uint8_t dn_extra_slots; /* # of subsequent slots consumed */ uint8_t dn_pad2[3]; /* accounting is protected by dn_dirty_mtx */ uint64_t dn_maxblkid; /* largest allocated block ID */ uint64_t dn_used; /* bytes (or sectors) of disk space */ uint64_t dn_pad3[4]; /* * The tail region is 448 bytes for a 512 byte dnode, and * correspondingly larger for larger dnode sizes. The spill * block pointer, when present, is always at the end of the tail * region. There are three ways this space may be used, using * a 512 byte dnode for this diagram: * * 0 64 128 192 256 320 384 448 (offset) * +---------------+---------------+---------------+-------+ * | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / | * +---------------+---------------+---------------+-------+ * | dn_blkptr[0] | dn_bonus[0..319] | * +---------------+-----------------------+---------------+ * | dn_blkptr[0] | dn_bonus[0..191] | dn_spill | * +---------------+-----------------------+---------------+ */ union { blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)]; struct { blkptr_t __dn_ignore1; uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN]; }; struct { blkptr_t __dn_ignore2; uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN - sizeof (blkptr_t)]; blkptr_t dn_spill; }; }; } dnode_phys_t; #define DN_SPILL_BLKPTR(dnp) (blkptr_t *)((char *)(dnp) + \ (((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT)) typedef enum dmu_object_byteswap { DMU_BSWAP_UINT8, DMU_BSWAP_UINT16, DMU_BSWAP_UINT32, DMU_BSWAP_UINT64, DMU_BSWAP_ZAP, DMU_BSWAP_DNODE, DMU_BSWAP_OBJSET, DMU_BSWAP_ZNODE, DMU_BSWAP_OLDACL, DMU_BSWAP_ACL, /* * Allocating a new byteswap type number makes the on-disk format * incompatible with any other format that uses the same number. * * Data can usually be structured to work with one of the * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types. */ DMU_BSWAP_NUMFUNCS } dmu_object_byteswap_t; #define DMU_OT_NEWTYPE 0x80 #define DMU_OT_METADATA 0x40 #define DMU_OT_BYTESWAP_MASK 0x3f /* * Defines a uint8_t object type. Object types specify if the data * in the object is metadata (boolean) and how to byteswap the data * (dmu_object_byteswap_t). */ #define DMU_OT(byteswap, metadata) \ (DMU_OT_NEWTYPE | \ ((metadata) ? DMU_OT_METADATA : 0) | \ ((byteswap) & DMU_OT_BYTESWAP_MASK)) typedef enum dmu_object_type { DMU_OT_NONE, /* general: */ DMU_OT_OBJECT_DIRECTORY, /* ZAP */ DMU_OT_OBJECT_ARRAY, /* UINT64 */ DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ DMU_OT_BPLIST, /* UINT64 */ DMU_OT_BPLIST_HDR, /* UINT64 */ /* spa: */ DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ DMU_OT_SPACE_MAP, /* UINT64 */ /* zil: */ DMU_OT_INTENT_LOG, /* UINT64 */ /* dmu: */ DMU_OT_DNODE, /* DNODE */ DMU_OT_OBJSET, /* OBJSET */ /* dsl: */ DMU_OT_DSL_DIR, /* UINT64 */ DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ DMU_OT_DSL_PROPS, /* ZAP */ DMU_OT_DSL_DATASET, /* UINT64 */ /* zpl: */ DMU_OT_ZNODE, /* ZNODE */ DMU_OT_OLDACL, /* Old ACL */ DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ DMU_OT_MASTER_NODE, /* ZAP */ DMU_OT_UNLINKED_SET, /* ZAP */ /* zvol: */ DMU_OT_ZVOL, /* UINT8 */ DMU_OT_ZVOL_PROP, /* ZAP */ /* other; for testing only! */ DMU_OT_PLAIN_OTHER, /* UINT8 */ DMU_OT_UINT64_OTHER, /* UINT64 */ DMU_OT_ZAP_OTHER, /* ZAP */ /* new object types: */ DMU_OT_ERROR_LOG, /* ZAP */ DMU_OT_SPA_HISTORY, /* UINT8 */ DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ DMU_OT_POOL_PROPS, /* ZAP */ DMU_OT_DSL_PERMS, /* ZAP */ DMU_OT_ACL, /* ACL */ DMU_OT_SYSACL, /* SYSACL */ DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ DMU_OT_NEXT_CLONES, /* ZAP */ DMU_OT_SCAN_QUEUE, /* ZAP */ DMU_OT_USERGROUP_USED, /* ZAP */ DMU_OT_USERGROUP_QUOTA, /* ZAP */ DMU_OT_USERREFS, /* ZAP */ DMU_OT_DDT_ZAP, /* ZAP */ DMU_OT_DDT_STATS, /* ZAP */ DMU_OT_SA, /* System attr */ DMU_OT_SA_MASTER_NODE, /* ZAP */ DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ DMU_OT_SCAN_XLATE, /* ZAP */ DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ DMU_OT_NUMTYPES, /* * Names for valid types declared with DMU_OT(). */ DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE), DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE), DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE), DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE), DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE), DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE), DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE), DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE), DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE), DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE) } dmu_object_type_t; typedef enum dmu_objset_type { DMU_OST_NONE, DMU_OST_META, DMU_OST_ZFS, DMU_OST_ZVOL, DMU_OST_OTHER, /* For testing only! */ DMU_OST_ANY, /* Be careful! */ DMU_OST_NUMTYPES } dmu_objset_type_t; #define ZAP_MAXVALUELEN (1024 * 8) /* * header for all bonus and spill buffers. * The header has a fixed portion with a variable number * of "lengths" depending on the number of variable sized * attribues which are determined by the "layout number" */ #define SA_MAGIC 0x2F505A /* ZFS SA */ typedef struct sa_hdr_phys { uint32_t sa_magic; uint16_t sa_layout_info; /* Encoded with hdrsize and layout number */ uint16_t sa_lengths[1]; /* optional sizes for variable length attrs */ /* ... Data follows the lengths. */ } sa_hdr_phys_t; /* * sa_hdr_phys -> sa_layout_info * * 16 10 0 * +--------+-------+ * | hdrsz |layout | * +--------+-------+ * * Bits 0-10 are the layout number * Bits 11-16 are the size of the header. * The hdrsize is the number * 8 * * For example. * hdrsz of 1 ==> 8 byte header * 2 ==> 16 byte header * */ #define SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10) #define SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0) #define SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \ { \ BF32_SET_SB(x, 10, 6, 3, 0, size); \ BF32_SET(x, 0, 10, num); \ } #define SA_MODE_OFFSET 0 #define SA_SIZE_OFFSET 8 #define SA_GEN_OFFSET 16 #define SA_UID_OFFSET 24 #define SA_GID_OFFSET 32 #define SA_PARENT_OFFSET 40 #define SA_SYMLINK_OFFSET 160 #define ZIO_OBJSET_MAC_LEN 32 /* * Intent log header - this on disk structure holds fields to manage * the log. All fields are 64 bit to easily handle cross architectures. */ typedef struct zil_header { uint64_t zh_claim_txg; /* txg in which log blocks were claimed */ uint64_t zh_replay_seq; /* highest replayed sequence number */ blkptr_t zh_log; /* log chain */ uint64_t zh_claim_seq; /* highest claimed sequence number */ uint64_t zh_pad[5]; } zil_header_t; #define OBJSET_PHYS_SIZE_V2 2048 #define OBJSET_PHYS_SIZE_V3 4096 typedef struct objset_phys { dnode_phys_t os_meta_dnode; zil_header_t os_zil_header; uint64_t os_type; uint64_t os_flags; uint8_t os_portable_mac[ZIO_OBJSET_MAC_LEN]; uint8_t os_local_mac[ZIO_OBJSET_MAC_LEN]; char os_pad0[OBJSET_PHYS_SIZE_V2 - sizeof (dnode_phys_t)*3 - sizeof (zil_header_t) - sizeof (uint64_t)*2 - 2*ZIO_OBJSET_MAC_LEN]; dnode_phys_t os_userused_dnode; dnode_phys_t os_groupused_dnode; dnode_phys_t os_projectused_dnode; char os_pad1[OBJSET_PHYS_SIZE_V3 - OBJSET_PHYS_SIZE_V2 - sizeof (dnode_phys_t)]; } objset_phys_t; typedef struct dsl_dir_phys { uint64_t dd_creation_time; /* not actually used */ uint64_t dd_head_dataset_obj; uint64_t dd_parent_obj; uint64_t dd_clone_parent_obj; uint64_t dd_child_dir_zapobj; /* * how much space our children are accounting for; for leaf * datasets, == physical space used by fs + snaps */ uint64_t dd_used_bytes; uint64_t dd_compressed_bytes; uint64_t dd_uncompressed_bytes; /* Administrative quota setting */ uint64_t dd_quota; /* Administrative reservation setting */ uint64_t dd_reserved; uint64_t dd_props_zapobj; uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */ } dsl_dir_phys_t; typedef struct dsl_dataset_phys { uint64_t ds_dir_obj; uint64_t ds_prev_snap_obj; uint64_t ds_prev_snap_txg; uint64_t ds_next_snap_obj; uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */ uint64_t ds_num_children; /* clone/snap children; ==0 for head */ uint64_t ds_creation_time; /* seconds since 1970 */ uint64_t ds_creation_txg; uint64_t ds_deadlist_obj; uint64_t ds_used_bytes; uint64_t ds_compressed_bytes; uint64_t ds_uncompressed_bytes; uint64_t ds_unique_bytes; /* only relevant to snapshots */ /* * The ds_fsid_guid is a 56-bit ID that can change to avoid * collisions. The ds_guid is a 64-bit ID that will never * change, so there is a small probability that it will collide. */ uint64_t ds_fsid_guid; uint64_t ds_guid; uint64_t ds_flags; blkptr_t ds_bp; uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */ } dsl_dataset_phys_t; /* * The names of zap entries in the DIRECTORY_OBJECT of the MOS. */ #define DMU_POOL_DIRECTORY_OBJECT 1 #define DMU_POOL_CONFIG "config" #define DMU_POOL_FEATURES_FOR_READ "features_for_read" #define DMU_POOL_ROOT_DATASET "root_dataset" #define DMU_POOL_SYNC_BPLIST "sync_bplist" #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" #define DMU_POOL_ERRLOG_LAST "errlog_last" #define DMU_POOL_SPARES "spares" #define DMU_POOL_DEFLATE "deflate" #define DMU_POOL_HISTORY "history" #define DMU_POOL_PROPS "pool_props" #define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt" #define DMU_POOL_REMOVING "com.delphix:removing" #define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj" #define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect" #define ZAP_MAGIC 0x2F52AB2ABULL #define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift) #define ZAP_MAXCD (uint32_t)(-1) #define ZAP_HASHBITS 28 #define MZAP_ENT_LEN 64 #define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2) #define MZAP_MAX_BLKSZ SPA_OLD_MAXBLOCKSIZE typedef struct mzap_ent_phys { uint64_t mze_value; uint32_t mze_cd; uint16_t mze_pad; /* in case we want to chain them someday */ char mze_name[MZAP_NAME_LEN]; } mzap_ent_phys_t; typedef struct mzap_phys { uint64_t mz_block_type; /* ZBT_MICRO */ uint64_t mz_salt; uint64_t mz_normflags; uint64_t mz_pad[5]; mzap_ent_phys_t mz_chunk[1]; /* actually variable size depending on block size */ } mzap_phys_t; /* * The (fat) zap is stored in one object. It is an array of * 1<= 6] [zap_leaf_t] [ptrtbl] ... * */ #define ZBT_LEAF ((1ULL << 63) + 0) #define ZBT_HEADER ((1ULL << 63) + 1) #define ZBT_MICRO ((1ULL << 63) + 3) /* any other values are ptrtbl blocks */ /* * the embedded pointer table takes up half a block: * block size / entry size (2^3) / 2 */ #define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1) /* * The embedded pointer table starts half-way through the block. Since * the pointer table itself is half the block, it starts at (64-bit) * word number (1<zap_phys) \ [(idx) + (1<l_bs) - hash entry size (2) * number of hash * entries - header space (2*chunksize) */ #define ZAP_LEAF_NUMCHUNKS(l) \ (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \ ZAP_LEAF_CHUNKSIZE - 2) /* * The amount of space within the chunk available for the array is: * chunk size - space for type (1) - space for next pointer (2) */ #define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3) #define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \ (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES) /* * Low water mark: when there are only this many chunks free, start * growing the ptrtbl. Ideally, this should be larger than a * "reasonably-sized" entry. 20 chunks is more than enough for the * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value), * while still being only around 3% for 16k blocks. */ #define ZAP_LEAF_LOW_WATER (20) /* * The leaf hash table has block size / 2^5 (32) number of entries, * which should be more than enough for the maximum number of entries, * which is less than block size / CHUNKSIZE (24) / minimum number of * chunks per entry (3). */ #define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5) #define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l)) /* * The chunks start immediately after the hash table. The end of the * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a * chunk_t. */ #define ZAP_LEAF_CHUNK(l, idx) \ ((zap_leaf_chunk_t *) \ ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx] #define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry) typedef enum zap_chunk_type { ZAP_CHUNK_FREE = 253, ZAP_CHUNK_ENTRY = 252, ZAP_CHUNK_ARRAY = 251, ZAP_CHUNK_TYPE_MAX = 250 } zap_chunk_type_t; /* * TAKE NOTE: * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. */ typedef struct zap_leaf_phys { struct zap_leaf_header { uint64_t lh_block_type; /* ZBT_LEAF */ uint64_t lh_pad1; uint64_t lh_prefix; /* hash prefix of this leaf */ uint32_t lh_magic; /* ZAP_LEAF_MAGIC */ uint16_t lh_nfree; /* number free chunks */ uint16_t lh_nentries; /* number of entries */ uint16_t lh_prefix_len; /* num bits used to id this */ /* above is accessable to zap, below is zap_leaf private */ uint16_t lh_freelist; /* chunk head of free list */ uint8_t lh_pad2[12]; } l_hdr; /* 2 24-byte chunks */ /* * The header is followed by a hash table with * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is * followed by an array of ZAP_LEAF_NUMCHUNKS(zap) * zap_leaf_chunk structures. These structures are accessed * with the ZAP_LEAF_CHUNK() macro. */ uint16_t l_hash[1]; } zap_leaf_phys_t; typedef union zap_leaf_chunk { struct zap_leaf_entry { uint8_t le_type; /* always ZAP_CHUNK_ENTRY */ uint8_t le_value_intlen; /* size of ints */ uint16_t le_next; /* next entry in hash chain */ uint16_t le_name_chunk; /* first chunk of the name */ uint16_t le_name_numints; /* bytes in name, incl null */ uint16_t le_value_chunk; /* first chunk of the value */ uint16_t le_value_numints; /* value length in ints */ uint32_t le_cd; /* collision differentiator */ uint64_t le_hash; /* hash value of the name */ } l_entry; struct zap_leaf_array { uint8_t la_type; /* always ZAP_CHUNK_ARRAY */ uint8_t la_array[ZAP_LEAF_ARRAY_BYTES]; uint16_t la_next; /* next blk or CHAIN_END */ } l_array; struct zap_leaf_free { uint8_t lf_type; /* always ZAP_CHUNK_FREE */ uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES]; uint16_t lf_next; /* next in free list, or CHAIN_END */ } l_free; } zap_leaf_chunk_t; typedef struct zap_leaf { int l_bs; /* block size shift */ zap_leaf_phys_t *l_phys; } zap_leaf_t; /* * Define special zfs pflags */ #define ZFS_XATTR 0x1 /* is an extended attribute */ #define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */ #define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */ #define MASTER_NODE_OBJ 1 /* * special attributes for master node. */ #define ZFS_FSID "FSID" #define ZFS_UNLINKED_SET "DELETE_QUEUE" #define ZFS_ROOT_OBJ "ROOT" #define ZPL_VERSION_OBJ "VERSION" #define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE" #define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS" #define ZFS_FLAG_BLOCKPERPAGE 0x1 #define ZFS_FLAG_NOGROWBLOCKS 0x2 /* * ZPL version - rev'd whenever an incompatible on-disk format change * occurs. Independent of SPA/DMU/ZAP versioning. */ #define ZPL_VERSION 1ULL /* * The directory entry has the type (currently unused on Solaris) in the * top 4 bits, and the object number in the low 48 bits. The "middle" * 12 bits are unused. */ #define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4) #define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48) #define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj) typedef struct ace { uid_t a_who; /* uid or gid */ uint32_t a_access_mask; /* read,write,... */ uint16_t a_flags; /* see below */ uint16_t a_type; /* allow or deny */ } ace_t; #define ACE_SLOT_CNT 6 typedef struct zfs_znode_acl { uint64_t z_acl_extern_obj; /* ext acl pieces */ uint32_t z_acl_count; /* Number of ACEs */ uint16_t z_acl_version; /* acl version */ uint16_t z_acl_pad; /* pad */ ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */ } zfs_znode_acl_t; /* * This is the persistent portion of the znode. It is stored * in the "bonus buffer" of the file. Short symbolic links * are also stored in the bonus buffer. */ typedef struct znode_phys { uint64_t zp_atime[2]; /* 0 - last file access time */ uint64_t zp_mtime[2]; /* 16 - last file modification time */ uint64_t zp_ctime[2]; /* 32 - last file change time */ uint64_t zp_crtime[2]; /* 48 - creation time */ uint64_t zp_gen; /* 64 - generation (txg of creation) */ uint64_t zp_mode; /* 72 - file mode bits */ uint64_t zp_size; /* 80 - size of file */ uint64_t zp_parent; /* 88 - directory parent (`..') */ uint64_t zp_links; /* 96 - number of links to file */ uint64_t zp_xattr; /* 104 - DMU object for xattrs */ uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */ uint64_t zp_flags; /* 120 - persistent flags */ uint64_t zp_uid; /* 128 - file owner */ uint64_t zp_gid; /* 136 - owning group */ uint64_t zp_pad[4]; /* 144 - future */ zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */ /* * Data may pad out any remaining bytes in the znode buffer, eg: * * |<---------------------- dnode_phys (512) ------------------------>| * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| * |<---- znode (264) ---->|<---- data (56) ---->| * * At present, we only use this space to store symbolic links. */ } znode_phys_t; /* * In-core vdev representation. */ struct vdev; struct spa; typedef int vdev_phys_read_t(struct vdev *vdev, void *priv, off_t offset, void *buf, size_t bytes); typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp, void *buf, off_t offset, size_t bytes); typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t; typedef struct vdev_indirect_mapping_entry_phys { /* * Decode with DVA_MAPPING_* macros. * Contains: * the source offset (low 63 bits) * the one-bit "mark", used for garbage collection (by zdb) */ uint64_t vimep_src; /* * Note: the DVA's asize is 24 bits, and can thus store ranges * up to 8GB. */ dva_t vimep_dst; } vdev_indirect_mapping_entry_phys_t; #define DVA_MAPPING_GET_SRC_OFFSET(vimep) \ BF64_GET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0) #define DVA_MAPPING_SET_SRC_OFFSET(vimep, x) \ BF64_SET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0, x) typedef struct vdev_indirect_mapping_entry { vdev_indirect_mapping_entry_phys_t vime_mapping; uint32_t vime_obsolete_count; list_node_t vime_node; } vdev_indirect_mapping_entry_t; /* * This is stored in the bonus buffer of the mapping object, see comment of * vdev_indirect_config for more details. */ typedef struct vdev_indirect_mapping_phys { uint64_t vimp_max_offset; uint64_t vimp_bytes_mapped; uint64_t vimp_num_entries; /* number of v_i_m_entry_phys_t's */ /* * For each entry in the mapping object, this object contains an * entry representing the number of bytes of that mapping entry * that were no longer in use by the pool at the time this indirect * vdev was last condensed. */ uint64_t vimp_counts_object; } vdev_indirect_mapping_phys_t; #define VDEV_INDIRECT_MAPPING_SIZE_V0 (3 * sizeof (uint64_t)) typedef struct vdev_indirect_mapping { uint64_t vim_object; boolean_t vim_havecounts; /* vim_entries segment offset currently in memory. */ uint64_t vim_entry_offset; /* vim_entries segment size. */ size_t vim_num_entries; /* Needed by dnode_read() */ const void *vim_spa; dnode_phys_t *vim_dn; /* * An ordered array of mapping entries, sorted by source offset. * Note that vim_entries is needed during a removal (and contains * mappings that have been synced to disk so far) to handle frees * from the removing device. */ vdev_indirect_mapping_entry_phys_t *vim_entries; objset_phys_t *vim_objset; vdev_indirect_mapping_phys_t *vim_phys; } vdev_indirect_mapping_t; /* * On-disk indirect vdev state. * * An indirect vdev is described exclusively in the MOS config of a pool. * The config for an indirect vdev includes several fields, which are * accessed in memory by a vdev_indirect_config_t. */ typedef struct vdev_indirect_config { /* * Object (in MOS) which contains the indirect mapping. This object * contains an array of vdev_indirect_mapping_entry_phys_t ordered by * vimep_src. The bonus buffer for this object is a * vdev_indirect_mapping_phys_t. This object is allocated when a vdev * removal is initiated. * * Note that this object can be empty if none of the data on the vdev * has been copied yet. */ uint64_t vic_mapping_object; /* * Object (in MOS) which contains the birth times for the mapping * entries. This object contains an array of * vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus * buffer for this object is a vdev_indirect_birth_phys_t. This object * is allocated when a vdev removal is initiated. * * Note that this object can be empty if none of the vdev has yet been * copied. */ uint64_t vic_births_object; /* * This is the vdev ID which was removed previous to this vdev, or * UINT64_MAX if there are no previously removed vdevs. */ uint64_t vic_prev_indirect_vdev; } vdev_indirect_config_t; typedef struct vdev { STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */ STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */ vdev_list_t v_children; /* children of this vdev */ const char *v_name; /* vdev name */ uint64_t v_guid; /* vdev guid */ uint64_t v_id; /* index in parent */ uint64_t v_psize; /* physical device capacity */ int v_ashift; /* offset to block shift */ int v_nparity; /* # parity for raidz */ struct vdev *v_top; /* parent vdev */ size_t v_nchildren; /* # children */ vdev_state_t v_state; /* current state */ vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */ vdev_read_t *v_read; /* read from vdev */ void *v_read_priv; /* private data for read function */ boolean_t v_islog; struct spa *v_spa; /* link to spa */ /* * Values stored in the config for an indirect or removing vdev. */ vdev_indirect_config_t vdev_indirect_config; vdev_indirect_mapping_t *v_mapping; } vdev_t; /* * In-core pool representation. */ typedef STAILQ_HEAD(spa_list, spa) spa_list_t; typedef struct spa { STAILQ_ENTRY(spa) spa_link; /* link in global pool list */ char *spa_name; /* pool name */ uint64_t spa_guid; /* pool guid */ uint64_t spa_txg; /* most recent transaction */ struct uberblock spa_uberblock; /* best uberblock so far */ vdev_t *spa_root_vdev; /* toplevel vdev container */ objset_phys_t spa_mos; /* MOS for this pool */ zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */ void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS]; boolean_t spa_with_log; /* this pool has log */ } spa_t; /* IO related arguments. */ typedef struct zio { spa_t *io_spa; blkptr_t *io_bp; void *io_data; uint64_t io_size; uint64_t io_offset; /* Stuff for the vdev stack */ vdev_t *io_vd; void *io_vsd; int io_error; } zio_t; static void decode_embedded_bp_compressed(const blkptr_t *, void *); + +#endif /* _ZFSIMPL_H_ */