diff --git a/sys/amd64/amd64/minidump_machdep.c b/sys/amd64/amd64/minidump_machdep.c index 975ae038cfdf..8d2bc3bca84a 100644 --- a/sys/amd64/amd64/minidump_machdep.c +++ b/sys/amd64/amd64/minidump_machdep.c @@ -1,412 +1,414 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_pmap.h" #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static size_t progress, dumpsize, wdog_next; static int dump_retry_count = 5; SYSCTL_INT(_machdep, OID_AUTO, dump_retry_count, CTLFLAG_RWTUN, &dump_retry_count, 0, "Number of times dump has to retry before bailing out"); static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } /* Pat the watchdog approximately every 128MB of the dump. */ #define WDOG_DUMP_INTERVAL (128 * 1024 * 1024) static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, i, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { printf("address not page aligned %p\n", ptr); return (EINVAL); } if (ptr != NULL) { /* If we're doing a virtual dump, flush any pre-existing pa pages */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; progress -= len; dumpsys_pb_progress(len); if (progress <= wdog_next) { wdog_kern_pat(WD_LASTVAL); if (wdog_next > WDOG_DUMP_INTERVAL) wdog_next -= WDOG_DUMP_INTERVAL; else wdog_next = 0; } if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { for (i = 0; i < len; i += PAGE_SIZE) dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT); fragsz += len; pa += len; sz -= len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } /* A fake page table page, to avoid having to handle both 4K and 2M pages */ static pd_entry_t fakepd[NPDEPG]; int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { uint32_t pmapsize; vm_offset_t va, kva_end; int error; uint64_t *pml4, *pdp, *pd, *pt, pa; uint64_t pdpe, pde, pte; int ii, j, k, n; int retry_count; struct minidumphdr mdhdr; + struct msgbuf *mbp; retry_count = 0; retry: retry_count++; /* Snapshot the KVA upper bound in case it grows. */ kva_end = MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end); /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. * * NB: for a live dump, we may be racing with updates to the page * tables, so care must be taken to read each entry only once. */ pmapsize = 0; for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) { /* * We always write a page, even if it is zero. Each * page written corresponds to 1GB of space */ pmapsize += PAGE_SIZE; ii = pmap_pml4e_index(va); pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]); if ((pdpe & PG_V) == 0) { va += NBPDP; continue; } /* * 1GB page is represented as 512 2MB pages in a dump. */ if ((pdpe & PG_PS) != 0) { va += NBPDP; pa = pdpe & PG_PS_FRAME; for (n = 0; n < NPDEPG * NPTEPG; n++) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); pa += PAGE_SIZE; } continue; } pd = (uint64_t *)PHYS_TO_DMAP(pdpe & PG_FRAME); for (n = 0; n < NPDEPG; n++, va += NBPDR) { pde = atomic_load_64(&pd[pmap_pde_index(va)]); if ((pde & PG_V) == 0) continue; if ((pde & PG_PS) != 0) { /* This is an entire 2M page. */ pa = pde & PG_PS_FRAME; for (k = 0; k < NPTEPG; k++) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); pa += PAGE_SIZE; } continue; } pa = pde & PG_FRAME; /* set bit for this PTE page */ if (vm_phys_is_dumpable(pa)) dump_add_page(pa); /* and for each valid page in this 2MB block */ pt = (uint64_t *)PHYS_TO_DMAP(pde & PG_FRAME); for (k = 0; k < NPTEPG; k++) { pte = atomic_load_64(&pt[k]); if ((pte & PG_V) == 0) continue; pa = pte & PG_FRAME; if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dump_add_page(pa); } } } /* Calculate dump size. */ + mbp = state->msgbufp; dumpsize = pmapsize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) { dumpsize += PAGE_SIZE; } else { dump_drop_page(pa); } } dumpsize += PAGE_SIZE; wdog_next = progress = dumpsize; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.dmapbase = DMAP_MIN_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump my header */ bzero(&fakepd, sizeof(fakepd)); bcopy(&mdhdr, &fakepd, sizeof(mdhdr)); error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); + error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(fakepd), "Large dump_avail not handled"); bzero(&fakepd, sizeof(fakepd)); memcpy(fakepd, dump_avail, sizeof(dump_avail)); error = blk_write(di, (char *)fakepd, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page directory pages */ bzero(fakepd, sizeof(fakepd)); for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) { ii = pmap_pml4e_index(va); pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]); /* We always write a page, even if it is zero */ if ((pdpe & PG_V) == 0) { error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakepd in the same block */ error = blk_flush(di); if (error) goto fail; continue; } /* 1GB page is represented as 512 2MB pages in a dump */ if ((pdpe & PG_PS) != 0) { /* PDPE and PDP have identical layout in this case */ fakepd[0] = pdpe; for (j = 1; j < NPDEPG; j++) fakepd[j] = fakepd[j - 1] + NBPDR; error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakepd in the same block */ error = blk_flush(di); if (error) goto fail; bzero(fakepd, sizeof(fakepd)); continue; } pa = pdpe & PG_FRAME; if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) { pd = (uint64_t *)PHYS_TO_DMAP(pa); error = blk_write(di, (char *)pd, 0, PAGE_SIZE); } else { /* Malformed pa, write the zeroed fakepd. */ error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); } if (error) goto fail; error = blk_flush(di); if (error) goto fail; } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < dump_retry_count) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) { printf("Dump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/arm/arm/minidump_machdep.c b/sys/arm/arm/minidump_machdep.c index e3bf37599d4a..76ce89f895e3 100644 --- a/sys/arm/arm/minidump_machdep.c +++ b/sys/arm/arm/minidump_machdep.c @@ -1,329 +1,330 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/minidump_machdep.c,v 1.6 2008/08/17 23:27:27 */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #ifdef SW_WATCHDOG #include #endif #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, i, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if (pa != 0) { if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if ((pa & PAGE_MASK) != 0) { printf("address not page aligned\n"); return (EINVAL); } } if (ptr != NULL) { /* Flush any pre-existing pa pages before a virtual dump. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); #ifdef SW_WATCHDOG wdog_kern_pat(WD_LASTVAL); #endif if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { for (i = 0; i < len; i += PAGE_SIZE) dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT); fragsz += len; pa += len; sz -= len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } /* A buffer for general use. Its size must be one page at least. */ static char dumpbuf[PAGE_SIZE] __aligned(sizeof(uint64_t)); CTASSERT(sizeof(dumpbuf) % sizeof(pt2_entry_t) == 0); int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { struct minidumphdr mdhdr; + struct msgbuf *mbp; uint64_t dumpsize, *dump_avail_buf; uint32_t ptesize; uint32_t pa, prev_pa = 0, count = 0; vm_offset_t va, kva_end; int error, i; char *addr; /* * Flush caches. Note that in the SMP case this operates only on the * current CPU's L1 cache. Before we reach this point, code in either * the system shutdown or kernel debugger has called stop_cpus() to stop * all cores other than this one. Part of the ARM handling of * stop_cpus() is to call wbinv_all() on that core's local L1 cache. So * by time we get to here, all that remains is to flush the L1 for the * current CPU, then the L2. */ dcache_wbinv_poc_all(); /* Snapshot the KVA upper bound in case it grows. */ kva_end = kernel_vm_end; /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. */ ptesize = 0; for (va = KERNBASE; va < kva_end; va += PAGE_SIZE) { pa = pmap_dump_kextract(va, NULL); if (pa != 0 && vm_phys_is_dumpable(pa)) dump_add_page(pa); ptesize += sizeof(pt2_entry_t); } /* Calculate dump size. */ + mbp = state->msgbufp; dumpsize = ptesize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = KERNBASE; mdhdr.arch = __ARM_ARCH; mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Physical memory: %u MB\n", ptoa((uintmax_t)physmem) / 1048576); printf("Dumping %llu MB:", (long long)dumpsize >> 20); /* Dump my header */ bzero(dumpbuf, sizeof(dumpbuf)); bcopy(&mdhdr, dumpbuf, sizeof(mdhdr)); error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, - round_page(msgbufp->msg_size)); + error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail. Make a copy using 64-bit physical addresses. */ _Static_assert(nitems(dump_avail) * sizeof(uint64_t) <= sizeof(dumpbuf), "Large dump_avail not handled"); bzero(dumpbuf, sizeof(dumpbuf)); dump_avail_buf = (uint64_t *)dumpbuf; for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { dump_avail_buf[i] = dump_avail[i]; dump_avail_buf[i + 1] = dump_avail[i + 1]; } error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ addr = dumpbuf; for (va = KERNBASE; va < kva_end; va += PAGE_SIZE) { pmap_dump_kextract(va, (pt2_entry_t *)addr); addr += sizeof(pt2_entry_t); if (addr == dumpbuf + sizeof(dumpbuf)) { error = blk_write(di, dumpbuf, 0, sizeof(dumpbuf)); if (error != 0) goto fail; addr = dumpbuf; } } if (addr != dumpbuf) { error = blk_write(di, dumpbuf, 0, addr - dumpbuf); if (error != 0) goto fail; } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { if (!count) { prev_pa = pa; count++; } else { if (pa == (prev_pa + count * PAGE_SIZE)) count++; else { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pa = pa; } } } if (count) { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pa = 0; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c index 5814cce632a9..5a935d3c379d 100644 --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -1,391 +1,392 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static size_t dumpsize; static uint64_t tmpbuffer[Ln_ENTRIES]; static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { printf("address not page aligned %p\n", ptr); return (EINVAL); } if (ptr != NULL) { /* * If we're doing a virtual dump, flush any * pre-existing pa pages. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { dump_va = (void *)PHYS_TO_DMAP(pa); fragsz += len; pa += len; sz -= len; error = blk_flush(di); if (error) return (error); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { struct minidumphdr mdhdr; + struct msgbuf *mbp; pd_entry_t *l0, *l1, l1e, *l2, l2e; pt_entry_t *l3, l3e; vm_offset_t va, kva_end; vm_paddr_t pa; uint32_t pmapsize; int error, i, j, retry_count; retry_count = 0; retry: retry_count++; error = 0; pmapsize = 0; /* Snapshot the KVA upper bound in case it grows. */ kva_end = kernel_vm_end; /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. * * NB: for a live dump, we may be racing with updates to the page * tables, so care must be taken to read each entry only once. */ for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) { pmapsize += PAGE_SIZE; if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) continue; l1e = atomic_load_64(l1); l2e = atomic_load_64(l2); if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { pa = l1e & ~ATTR_MASK; for (i = 0; i < Ln_ENTRIES * Ln_ENTRIES; i++, pa += PAGE_SIZE) if (vm_phys_is_dumpable(pa)) dump_add_page(pa); pmapsize += (Ln_ENTRIES - 1) * PAGE_SIZE; va += L1_SIZE - L2_SIZE; } else if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { pa = l2e & ~ATTR_MASK; for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); } } else if ((l2e & ATTR_DESCR_MASK) == L2_TABLE) { for (i = 0; i < Ln_ENTRIES; i++) { l3e = atomic_load_64(&l3[i]); if ((l3e & ATTR_DESCR_MASK) != L3_PAGE) continue; pa = l3e & ~ATTR_MASK; pa = l3e & ~ATTR_MASK; if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dump_add_page(pa); } } } /* Calculate dump size. */ + mbp = state->msgbufp; dumpsize = pmapsize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.dmapphys = DMAP_MIN_PHYSADDR; mdhdr.dmapbase = DMAP_MIN_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump my header */ bzero(&tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, - round_page(msgbufp->msg_size)); + error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer), "Large dump_avail not handled"); bzero(tmpbuffer, sizeof(tmpbuffer)); memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page directory pages */ bzero(&tmpbuffer, sizeof(tmpbuffer)); for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) { if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) { /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; continue; } l1e = atomic_load_64(l1); l2e = atomic_load_64(l2); if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { /* * Handle a 1GB block mapping: write out 512 fake L2 * pages. */ pa = (l1e & ~ATTR_MASK) | (va & L1_OFFSET); for (i = 0; i < Ln_ENTRIES; i++) { for (j = 0; j < Ln_ENTRIES; j++) { tmpbuffer[j] = pa + i * L2_SIZE + j * PAGE_SIZE | ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; } /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); va += L1_SIZE - L2_SIZE; } else if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { pa = (l2e & ~ATTR_MASK) | (va & L2_OFFSET); /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = pa + (i * PAGE_SIZE) | ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakepd in the same block */ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); continue; } else { pa = l2e & ~ATTR_MASK; /* * We always write a page, even if it is zero. If pa * is malformed, write the zeroed tmpbuffer. */ if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) error = blk_write(di, NULL, pa, PAGE_SIZE); else error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; } } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < 5) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) { printf("Dump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/i386/i386/minidump_machdep_base.c b/sys/i386/i386/minidump_machdep_base.c index 9b036f0fd700..196c375bec56 100644 --- a/sys/i386/i386/minidump_machdep_base.c +++ b/sys/i386/i386/minidump_machdep_base.c @@ -1,346 +1,349 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); #define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK) #define DEV_ALIGN(x) roundup2((off_t)(x), DEV_BSIZE) static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, i, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if (pa != 0 && (((uintptr_t)ptr) % PAGE_SIZE) != 0) { printf("address not page aligned\n"); return (EINVAL); } if (ptr != NULL) { /* If we're doing a virtual dump, flush any pre-existing pa pages */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { for (i = 0; i < len; i += PAGE_SIZE) dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT); fragsz += len; pa += len; sz -= len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } /* A fake page table page, to avoid having to handle both 4K and 2M pages */ static pt_entry_t fakept[NPTEPG]; #ifdef PMAP_PAE_COMP #define cpu_minidumpsys cpu_minidumpsys_pae #define IdlePTD IdlePTD_pae #else #define cpu_minidumpsys cpu_minidumpsys_nopae #define IdlePTD IdlePTD_nopae #endif int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { uint64_t dumpsize; uint32_t ptesize; vm_offset_t va, kva_end; int error; uint64_t pa; pd_entry_t *pd, pde; pt_entry_t *pt, pte; int j, k; struct minidumphdr mdhdr; + struct msgbuf *mbp; /* Snapshot the KVA upper bound in case it grows. */ kva_end = kernel_vm_end; /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. * * NB: for a live dump, we may be racing with updates to the page * tables, so care must be taken to read each entry only once. */ ptesize = 0; for (va = KERNBASE; va < kva_end; va += NBPDR) { /* * We always write a page, even if it is zero. Each * page written corresponds to 2MB of space */ ptesize += PAGE_SIZE; pd = IdlePTD; /* always mapped! */ j = va >> PDRSHIFT; pde = pte_load(&pd[va >> PDRSHIFT]); if ((pde & (PG_PS | PG_V)) == (PG_PS | PG_V)) { /* This is an entire 2M page. */ pa = pde & PG_PS_FRAME; for (k = 0; k < NPTEPG; k++) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); pa += PAGE_SIZE; } continue; } if ((pde & PG_V) == PG_V) { /* set bit for each valid page in this 2MB block */ pt = pmap_kenter_temporary(pde & PG_FRAME, 0); for (k = 0; k < NPTEPG; k++) { pte = pte_load(&pt[k]); if ((pte & PG_V) == PG_V) { pa = pte & PG_FRAME; if (vm_phys_is_dumpable(pa)) dump_add_page(pa); } } } else { /* nothing, we're going to dump a null page */ } } /* Calculate dump size. */ + mbp = state->msgbufp; dumpsize = ptesize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (vm_phys_is_dumpable(pa)) { dumpsize += PAGE_SIZE; } else { dump_drop_page(pa); } } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = KERNBASE; mdhdr.paemode = pae_mode; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_I386_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Physical memory: %ju MB\n", ptoa((uintmax_t)physmem) / 1048576); printf("Dumping %llu MB:", (long long)dumpsize >> 20); /* Dump my header */ bzero(&fakept, sizeof(fakept)); bcopy(&mdhdr, &fakept, sizeof(mdhdr)); error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); + error = blk_write(di, (char *)mbp->msg_ptr, 0, + round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(fakept), "Large dump_avail not handled"); bzero(fakept, sizeof(fakept)); memcpy(fakept, dump_avail, sizeof(dump_avail)); error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ for (va = KERNBASE; va < kva_end; va += NBPDR) { /* We always write a page, even if it is zero */ pd = IdlePTD; /* always mapped! */ pde = pte_load(&pd[va >> PDRSHIFT]); if ((pde & (PG_PS | PG_V)) == (PG_PS | PG_V)) { /* This is a single 2M block. Generate a fake PTP */ pa = pde & PG_PS_FRAME; for (k = 0; k < NPTEPG; k++) { fakept[k] = (pa + (k * PAGE_SIZE)) | PG_V | PG_RW | PG_A | PG_M; } error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakept in the same block */ error = blk_flush(di); if (error) goto fail; continue; } if ((pde & PG_V) == PG_V) { pa = pde & PG_FRAME; error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } else { bzero(fakept, sizeof(fakept)); error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakept in the same block */ error = blk_flush(di); if (error) goto fail; } } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/mips/mips/minidump_machdep.c b/sys/mips/mips/minidump_machdep.c index 06a63834cab8..49a5ee3f15a7 100644 --- a/sys/mips/mips/minidump_machdep.c +++ b/sys/mips/mips/minidump_machdep.c @@ -1,279 +1,280 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/arm/arm/minidump_machdep.c v214223 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static uint64_t dumpsize; /* Just auxiliary bufffer */ static char tmpbuffer[PAGE_SIZE] __aligned(sizeof(uint64_t)); extern pd_entry_t *kernel_segmap; static int write_buffer(struct dumperinfo *di, char *ptr, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = di->maxiosize; if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; while (sz) { len = min(maxdumpsz, sz); dumpsys_pb_progress(len); wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { panic("pa is not supported"); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { struct minidumphdr mdhdr; + struct msgbuf *mbp; uint64_t *dump_avail_buf; uint32_t ptesize; vm_paddr_t pa; vm_offset_t prev_pte = 0; uint32_t count = 0; vm_offset_t va; pt_entry_t *pte; int i, error; void *dump_va; /* Flush cache */ mips_dcache_wbinv_all(); /* Walk page table pages, set bits in vm_page_dump */ ptesize = 0; for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { ptesize += PAGE_SIZE; pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); for (i = 0; i < NPTEPG; i++) { if (pte_test(&pte[i], PTE_V)) { pa = TLBLO_PTE_TO_PA(pte[i]); if (vm_phys_is_dumpable(pa)) dump_add_page(pa); } } } /* * Now mark pages from 0 to phys_avail[0], that's where kernel * and pages allocated by pmap_steal reside */ for (pa = 0; pa < phys_avail[0]; pa += PAGE_SIZE) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); } /* Calculate dump size. */ + mbp = state->msgbufp; dumpsize = ptesize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump my header */ bzero(tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, tmpbuffer, sizeof(mdhdr)); error = write_buffer(di, tmpbuffer, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = write_buffer(di, (char *)msgbufp->msg_ptr, - round_page(msgbufp->msg_size)); + error = write_buffer(di, mbp->msg_ptr, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail. Make a copy using 64-bit physical addresses. */ _Static_assert(nitems(dump_avail) * sizeof(uint64_t) <= sizeof(tmpbuffer), "Large dump_avail not handled"); bzero(tmpbuffer, sizeof(tmpbuffer)); if (sizeof(dump_avail[0]) != sizeof(uint64_t)) { dump_avail_buf = (uint64_t *)tmpbuffer; for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i++) { dump_avail_buf[i] = dump_avail[i]; dump_avail_buf[i + 1] = dump_avail[i + 1]; } } else { memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); } error = write_buffer(di, tmpbuffer, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = write_buffer(di, (char *)vm_page_dump, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); if (!count) { prev_pte = (vm_offset_t)pte; count++; } else { if ((vm_offset_t)pte == (prev_pte + count * PAGE_SIZE)) count++; else { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pte = (vm_offset_t)pte; } } } if (count) { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pte = 0; } /* Dump memory chunks page by page*/ VM_PAGE_DUMP_FOREACH(pa) { dump_va = pmap_kenter_temporary(pa, 0); error = write_buffer(di, dump_va, PAGE_SIZE); if (error) goto fail; pmap_kenter_temporary_free(pa); } error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/powerpc/powerpc/minidump_machdep.c b/sys/powerpc/powerpc/minidump_machdep.c index e6a0f3918883..c7e8d1965b98 100644 --- a/sys/powerpc/powerpc/minidump_machdep.c +++ b/sys/powerpc/powerpc/minidump_machdep.c @@ -1,338 +1,339 @@ /*- * Copyright (c) 2019 Leandro Lupori * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Debugging stuff */ #define MINIDUMP_DEBUG 0 #if MINIDUMP_DEBUG #define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__) #define DBG(...) __VA_ARGS__ static size_t total, dumptotal; static void dump_total(const char *id, size_t sz); #else #define dprintf(fmt, ...) #define DBG(...) #define dump_total(...) #endif extern vm_offset_t __startkernel, __endkernel; static int dump_retry_count = 5; SYSCTL_INT(_machdep, OID_AUTO, dump_retry_count, CTLFLAG_RWTUN, &dump_retry_count, 0, "Number of times dump has to retry before bailing out"); static struct kerneldumpheader kdh; static char pgbuf[PAGE_SIZE]; static size_t dumpsize; /* Handle chunked writes. */ static size_t fragsz; static void pmap_kenter_temporary(vm_offset_t va, vm_paddr_t pa) { pmap_kremove(va); pmap_kenter(va, pa); } static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, crashdumpmap, 0, fragsz); DBG(dumptotal += fragsz;) fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len, maxdumpsz; int error, i, c; maxdumpsz = MIN(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("Size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("Can't have both va and pa!\n"); return (EINVAL); } if ((pa % PAGE_SIZE) != 0) { printf("Address not page aligned 0x%lx\n", pa); return (EINVAL); } if (ptr != NULL) { /* * If we're doing a virtual dump, flush any pre-existing * pa pages */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); DBG(dumptotal += len;) ptr += len; } else { for (i = 0; i < len; i += PAGE_SIZE) pmap_kenter_temporary( (vm_offset_t)crashdumpmap + fragsz + i, pa + i); fragsz += len; pa += len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } sz -= len; /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } static int dump_pmap(struct dumperinfo *di) { void *ctx; char *buf; u_long nbytes; int error; ctx = dumpsys_dump_pmap_init(sizeof(pgbuf) / PAGE_SIZE); for (;;) { buf = dumpsys_dump_pmap(ctx, pgbuf, &nbytes); if (buf == NULL) break; error = blk_write(di, buf, 0, nbytes); if (error) return (error); } return (0); } int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { vm_paddr_t pa; int error, retry_count; uint32_t pmapsize; struct minidumphdr mdhdr; + struct msgbuf *mbp; retry_count = 0; retry: retry_count++; fragsz = 0; DBG(total = dumptotal = 0;) /* Build set of dumpable pages from kernel pmap */ pmapsize = dumpsys_scan_pmap(); if (pmapsize % PAGE_SIZE != 0) { printf("pmapsize not page aligned: 0x%x\n", pmapsize); return (EINVAL); } /* Calculate dump size */ + mbp = state->msgbufp; dumpsize = PAGE_SIZE; /* header */ - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); dumpsize += pmapsize; VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); strncpy(mdhdr.mmu_name, pmap_mmu_name(), sizeof(mdhdr.mmu_name) - 1); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.kernend = VM_MAX_SAFE_KERNEL_ADDRESS; mdhdr.dmapbase = DMAP_BASE_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mdhdr.hw_direct_map = hw_direct_map; mdhdr.startkernel = __startkernel; mdhdr.endkernel = __endkernel; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_POWERPC_VERSION, dumpsize); error = dump_start(di, &kdh); if (error) goto fail; printf("Dumping %lu out of %ju MB:", dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump minidump header */ bzero(pgbuf, sizeof(pgbuf)); memcpy(pgbuf, &mdhdr, sizeof(mdhdr)); error = blk_write(di, pgbuf, 0, PAGE_SIZE); if (error) goto fail; dump_total("header", PAGE_SIZE); /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, - round_page(msgbufp->msg_size)); - dump_total("msgbuf", round_page(msgbufp->msg_size)); + error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); + dump_total("msgbuf", round_page(mbp->msg_size)); /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(pgbuf), "Large dump_avail not handled"); bzero(pgbuf, sizeof(mdhdr)); memcpy(pgbuf, dump_avail, sizeof(dump_avail)); error = blk_write(di, pgbuf, 0, PAGE_SIZE); if (error) goto fail; dump_total("dump_avail", round_page(sizeof(dump_avail))); /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; dump_total("bitmap", round_page(BITSET_SIZE(vm_page_dump_pages))); /* Dump kernel page directory pages */ error = dump_pmap(di); if (error) goto fail; dump_total("pmap", pmapsize); /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; dump_total("mem_chunks", dumpsize - total); error = dump_finish(di, &kdh); if (error) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < dump_retry_count) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) printf("Dump failed. Partition too small.\n"); else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); } #if MINIDUMP_DEBUG static void dump_total(const char *id, size_t sz) { total += sz; dprintf("\n%s=%08lx/%08lx/%08lx\n", id, sz, total, dumptotal); } #endif diff --git a/sys/riscv/riscv/minidump_machdep.c b/sys/riscv/riscv/minidump_machdep.c index 8f5a4a4d1289..814c2dd00697 100644 --- a/sys/riscv/riscv/minidump_machdep.c +++ b/sys/riscv/riscv/minidump_machdep.c @@ -1,367 +1,368 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * Copyright (c) 2019 Mitchell Horne * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static size_t dumpsize; static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)]; static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } /* * Write a block of data to the dump file. * * Caller can provide data through a pointer or by specifying its * physical address. * * XXX writes using pa should be no larger than PAGE_SIZE. */ static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { printf("address not page aligned %#lx\n", (uintptr_t)pa); return (EINVAL); } if (ptr != NULL) { /* * If we're doing a virtual dump, flush any * pre-existing pa pages. */ error = blk_flush(di); if (error != 0) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error != 0) return (error); ptr += len; sz -= len; } else { dump_va = (void *)PHYS_TO_DMAP(pa); fragsz += len; pa += len; sz -= len; error = blk_flush(di); if (error != 0) return (error); } /* Check for user abort */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { pd_entry_t *l1, *l2, l2e; pt_entry_t *l3, l3e; struct minidumphdr mdhdr; + struct msgbuf *mbp; uint32_t pmapsize; vm_offset_t va, kva_max; vm_paddr_t pa; int error; int i; int retry_count; retry_count = 0; retry: retry_count++; error = 0; pmapsize = 0; /* Snapshot the KVA upper bound in case it grows. */ kva_max = kernel_vm_end; /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. * * NB: for a live dump, we may be racing with updates to the page * tables, so care must be taken to read each entry only once. */ for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) { pmapsize += PAGE_SIZE; if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) continue; /* We should always be using the l2 table for kvm */ if (l2 == NULL) continue; /* l2 may be a superpage */ l2e = atomic_load_64(l2); if ((l2e & PTE_RWX) != 0) { pa = (l2e >> PTE_PPN1_S) << L2_SHIFT; for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) { if (vm_phys_is_dumpable(pa)) dump_add_page(pa); } } else { for (i = 0; i < Ln_ENTRIES; i++) { l3e = atomic_load_64(&l3[i]); if ((l3e & PTE_V) == 0) continue; pa = (l3e >> PTE_PPN0_S) * PAGE_SIZE; if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dump_add_page(pa); } } } /* Calculate dump size */ + mbp = state->msgbufp; dumpsize = pmapsize; - dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; - mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.pmapsize = pmapsize; mdhdr.kernbase = KERNBASE; mdhdr.dmapphys = DMAP_MIN_PHYSADDR; mdhdr.dmapbase = DMAP_MIN_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump minidump header */ bzero(&tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ - error = blk_write(di, (char *)msgbufp->msg_ptr, 0, - round_page(msgbufp->msg_size)); + error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer), "Large dump_avail not handled"); bzero(tmpbuffer, sizeof(tmpbuffer)); memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page directory pages */ bzero(&tmpbuffer, sizeof(tmpbuffer)); for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) { if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) { /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Flush, in case we reuse tmpbuffer in the same block */ error = blk_flush(di); if (error) goto fail; continue; } l2e = atomic_load_64(l2); if ((l2e & PTE_RWX) != 0) { /* Generate fake l3 entries based on the l2 superpage */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = (l2e | (i << PTE_PPN0_S)); } /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Flush, in case we reuse tmpbuffer in the same block */ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); } else { pa = (l2e >> PTE_PPN0_S) * PAGE_SIZE; /* * We always write a page, even if it is zero. If pa * is malformed, write the zeroed tmpbuffer. */ if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) error = blk_write(di, NULL, pa, PAGE_SIZE); else error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; } } /* Dump memory chunks */ /* XXX cluster it up and use blk_dump() */ VM_PAGE_DUMP_FOREACH(pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < 5) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) { printf("Dump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); }