diff --git a/lib/libkvm/kvm_aarch64.h b/lib/libkvm/kvm_aarch64.h index fbbd869ab425..fdbdafde025b 100644 --- a/lib/libkvm/kvm_aarch64.h +++ b/lib/libkvm/kvm_aarch64.h @@ -1,66 +1,65 @@ /*- * Copyright (c) 2015 John H. Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_AARCH64_H__ #define __KVM_AARCH64_H__ #ifdef __aarch64__ #include #endif typedef uint64_t aarch64_physaddr_t; typedef uint64_t aarch64_pte_t; -#define AARCH64_PAGE_SHIFT 12 -#define AARCH64_PAGE_SIZE (1 << AARCH64_PAGE_SHIFT) -#define AARCH64_PAGE_MASK (AARCH64_PAGE_SIZE - 1) +#define AARCH64_PAGE_SHIFT_4K 12 +#define AARCH64_PAGE_SIZE_4K (1 << AARCH64_PAGE_SHIFT_4K) + +#define AARCH64_PAGE_SHIFT_16K 14 +#define AARCH64_PAGE_SIZE_16K (1 << AARCH64_PAGE_SHIFT_16K) /* Source: arm64/include/pte.h */ #define AARCH64_ATTR_MASK 0xfffc000000000fff #define AARCH64_ATTR_UXN (1ULL << 54) #define AARCH64_ATTR_PXN (1ULL << 53) #define AARCH64_ATTR_XN (AARCH64_ATTR_PXN | AARCH64_ATTR_UXN) #define AARCH64_ATTR_AP(x) ((x) << 6) #define AARCH64_ATTR_AP_RO (1 << 1) #define AARCH64_ATTR_DESCR_MASK 3 -#define AARCH64_L3_SHIFT 12 +#define AARCH64_L3_SHIFT_4K 12 +#define AARCH64_L3_SHIFT_16K 14 #define AARCH64_L3_PAGE 0x3 #ifdef __aarch64__ -_Static_assert(PAGE_SHIFT == AARCH64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); -_Static_assert(PAGE_SIZE == AARCH64_PAGE_SIZE, "PAGE_SIZE mismatch"); -_Static_assert(PAGE_MASK == AARCH64_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(ATTR_MASK == AARCH64_ATTR_MASK, "ATTR_MASK mismatch"); _Static_assert(ATTR_DESCR_MASK == AARCH64_ATTR_DESCR_MASK, "ATTR_DESCR_MASK mismatch"); -_Static_assert(L3_SHIFT == AARCH64_L3_SHIFT, "L3_SHIFT mismatch"); _Static_assert(L3_PAGE == AARCH64_L3_PAGE, "L3_PAGE mismatch"); #endif #endif /* !__KVM_AARCH64_H__ */ diff --git a/lib/libkvm/kvm_minidump_aarch64.c b/lib/libkvm/kvm_minidump_aarch64.c index 7fd4219fbf21..5e9ac739406f 100644 --- a/lib/libkvm/kvm_minidump_aarch64.c +++ b/lib/libkvm/kvm_minidump_aarch64.c @@ -1,294 +1,325 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 */ #include __FBSDID("$FreeBSD$"); /* * ARM64 (AArch64) machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/arm64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_aarch64.h" -#define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) +#define aarch64_round_page(x, size) roundup2((kvaddr_t)(x), size) +#define aarch64_trunc_page(x, size) rounddown2((kvaddr_t)(x), size) struct vmstate { struct minidumphdr hdr; + size_t page_size; + u_int l3_shift; }; static aarch64_pte_t _aarch64_pte_get(kvm_t *kd, u_long pteindex) { aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static int _aarch64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && _kvm_is_minidump(kd)); } static void _aarch64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _aarch64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); - if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { + if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); - vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? - le32toh(vmst->hdr.dumpavailsize) : 0; + /* dumpavailsize added in version 2 */ + if (vmst->hdr.version >= 2) { + vmst->hdr.dumpavailsize = le32toh(vmst->hdr.dumpavailsize); + } else { + vmst->hdr.dumpavailsize = 0; + } + /* flags added in version 3 */ + if (vmst->hdr.version >= 3) { + vmst->hdr.flags = le32toh(vmst->hdr.flags); + } else { + vmst->hdr.flags = MINIDUMP_FLAG_PS_4K; + } + + switch (vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK) { + case MINIDUMP_FLAG_PS_4K: + vmst->page_size = AARCH64_PAGE_SIZE_4K; + vmst->l3_shift = AARCH64_L3_SHIFT_4K; + break; + case MINIDUMP_FLAG_PS_16K: + vmst->page_size = AARCH64_PAGE_SIZE_16K; + vmst->l3_shift = AARCH64_L3_SHIFT_16K; + break; + default: + _kvm_err(kd, kd->program, "unknown page size flag %x", + vmst->hdr.flags & MINIDUMP_FLAG_PS_MASK); + return (-1); + } /* Skip header and msgbuf */ - dump_avail_off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); + dump_avail_off = vmst->page_size + + aarch64_round_page(vmst->hdr.msgbufsize, vmst->page_size); /* Skip dump_avail */ - off = dump_avail_off + aarch64_round_page(vmst->hdr.dumpavailsize); + off = dump_avail_off + + aarch64_round_page(vmst->hdr.dumpavailsize, vmst->page_size); /* build physical address lookup table for sparse pages */ - sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) + - aarch64_round_page(vmst->hdr.pmapsize); + sparse_off = off + + aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size) + + aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, vmst->page_size) == -1) { return (-1); } - off += aarch64_round_page(vmst->hdr.bitmapsize); + off += aarch64_round_page(vmst->hdr.bitmapsize, vmst->page_size); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } - off += aarch64_round_page(vmst->hdr.pmapsize); + off += aarch64_round_page(vmst->hdr.pmapsize, vmst->page_size); return (0); } static int _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; aarch64_physaddr_t offset; aarch64_pte_t l3; kvaddr_t l3_index; aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; - offset = va & AARCH64_PAGE_MASK; + offset = va & (kd->vmst->page_size - 1); if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { - a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & - ~AARCH64_PAGE_MASK; - ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); + a = aarch64_trunc_page(va - vm->hdr.dmapbase + vm->hdr.dmapphys, + kd->vmst->page_size); + ofs = _kvm_pt_find(kd, a, kd->vmst->page_size); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; - return (AARCH64_PAGE_SIZE - offset); + return (kd->vmst->page_size - offset); } else if (va >= vm->hdr.kernbase) { - l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; + l3_index = (va - vm->hdr.kernbase) >> kd->vmst->l3_shift; if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) goto invalid; l3 = _aarch64_pte_get(kd, l3_index); if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: pde not valid"); goto invalid; } a = l3 & ~AARCH64_ATTR_MASK; - ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); + ofs = _kvm_pt_find(kd, a, kd->vmst->page_size); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (AARCH64_PAGE_SIZE - offset); + return (kd->vmst->page_size - offset); } else { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_aarch64_minidump_kvatop called in live kernel!"); return (0); } return (_aarch64_minidump_vatop(kd, va, pa)); } static int _aarch64_native(kvm_t *kd __unused) { #ifdef __aarch64__ return (1); #else return (0); #endif } static vm_prot_t _aarch64_entry_to_prot(aarch64_pte_t pte) { vm_prot_t prot = VM_PROT_READ; /* Source: arm64/arm64/pmap.c:pmap_protect() */ if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0) prot |= VM_PROT_WRITE; if ((pte & AARCH64_ATTR_XN) == 0) prot |= VM_PROT_EXECUTE; return prot; } static int _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t); u_long bmindex, dva, pa, pteindex, va; struct kvm_bitmap bm; vm_prot_t prot; int ret = 0; if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pteindex = 0; pteindex < nptes; pteindex++) { aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex); if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) continue; - va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT); + va = vm->hdr.kernbase + (pteindex << kd->vmst->l3_shift); pa = pte & ~AARCH64_ATTR_MASK; dva = vm->hdr.dmapbase + pa; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, - _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) { + _aarch64_entry_to_prot(pte), kd->vmst->page_size, 0)) { goto out; } } while (_kvm_bitmap_next(&bm, &bmindex)) { - pa = _kvm_bit_id_pa(kd, bmindex, AARCH64_PAGE_SIZE); + pa = _kvm_bit_id_pa(kd, bmindex, kd->vmst->page_size); if (pa == _KVM_PA_INVALID) break; dva = vm->hdr.dmapbase + pa; - if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE)) + if (vm->hdr.dmapend < (dva + kd->vmst->page_size)) break; va = 0; prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, - prot, AARCH64_PAGE_SIZE, 0)) { + prot, kd->vmst->page_size, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_aarch64_minidump = { .ka_probe = _aarch64_minidump_probe, .ka_initvtop = _aarch64_minidump_initvtop, .ka_freevtop = _aarch64_minidump_freevtop, .ka_kvatop = _aarch64_minidump_kvatop, .ka_native = _aarch64_native, .ka_walk_pages = _aarch64_minidump_walk_pages, }; KVM_ARCH(kvm_aarch64_minidump); diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c index ac5a7b271b85..e05a19fc1c41 100644 --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -1,394 +1,395 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static size_t dumpsize; static uint64_t tmpbuffer[Ln_ENTRIES]; static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { printf("address not page aligned %p\n", ptr); return (EINVAL); } if (ptr != NULL) { /* * If we're doing a virtual dump, flush any * pre-existing pa pages. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; dumpsys_pb_progress(len); wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { dump_va = (void *)PHYS_TO_DMAP(pa); fragsz += len; pa += len; sz -= len; error = blk_flush(di); if (error) return (error); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) { struct minidumphdr mdhdr; struct msgbuf *mbp; pd_entry_t *l0, *l1, l1e, *l2, l2e; pt_entry_t *l3, l3e; vm_offset_t va, kva_end; vm_paddr_t pa; uint32_t pmapsize; int error, i, j, retry_count; retry_count = 0; retry: retry_count++; error = 0; pmapsize = 0; /* Snapshot the KVA upper bound in case it grows. */ kva_end = kernel_vm_end; /* * Walk the kernel page table pages, setting the active entries in the * dump bitmap. * * NB: for a live dump, we may be racing with updates to the page * tables, so care must be taken to read each entry only once. */ for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) { pmapsize += PAGE_SIZE; if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) continue; l1e = atomic_load_64(l1); l2e = atomic_load_64(l2); if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { pa = l1e & ~ATTR_MASK; for (i = 0; i < Ln_ENTRIES * Ln_ENTRIES; i++, pa += PAGE_SIZE) if (vm_phys_is_dumpable(pa)) vm_page_dump_add(state->dump_bitset, pa); pmapsize += (Ln_ENTRIES - 1) * PAGE_SIZE; va += L1_SIZE - L2_SIZE; } else if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { pa = l2e & ~ATTR_MASK; for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) { if (vm_phys_is_dumpable(pa)) vm_page_dump_add(state->dump_bitset, pa); } } else if ((l2e & ATTR_DESCR_MASK) == L2_TABLE) { for (i = 0; i < Ln_ENTRIES; i++) { l3e = atomic_load_64(&l3[i]); if ((l3e & ATTR_DESCR_MASK) != L3_PAGE) continue; pa = l3e & ~ATTR_MASK; if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) vm_page_dump_add(state->dump_bitset, pa); } } } /* Calculate dump size. */ mbp = state->msgbufp; dumpsize = pmapsize; dumpsize += round_page(mbp->msg_size); dumpsize += round_page(sizeof(dump_avail)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) { if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) dumpsize += PAGE_SIZE; else vm_page_dump_drop(state->dump_bitset, pa); } dumpsize += PAGE_SIZE; dumpsys_pb_init(dumpsize); /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = mbp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.dmapphys = DMAP_MIN_PHYSADDR; mdhdr.dmapbase = DMAP_MIN_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); + mdhdr.flags = MINIDUMP_FLAG_PS_4K; dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump my header */ bzero(&tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = blk_write(di, mbp->msg_ptr, 0, round_page(mbp->msg_size)); if (error) goto fail; /* Dump dump_avail */ _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer), "Large dump_avail not handled"); bzero(tmpbuffer, sizeof(tmpbuffer)); memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)state->dump_bitset, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page directory pages */ bzero(&tmpbuffer, sizeof(tmpbuffer)); for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) { if (!pmap_get_tables(pmap_kernel(), va, &l0, &l1, &l2, &l3)) { /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; continue; } l1e = atomic_load_64(l1); l2e = atomic_load_64(l2); if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK) { /* * Handle a 1GB block mapping: write out 512 fake L2 * pages. */ pa = (l1e & ~ATTR_MASK) | (va & L1_OFFSET); for (i = 0; i < Ln_ENTRIES; i++) { for (j = 0; j < Ln_ENTRIES; j++) { tmpbuffer[j] = (pa + i * L2_SIZE + j * PAGE_SIZE) | ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; } /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); va += L1_SIZE - L2_SIZE; } else if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) { pa = (l2e & ~ATTR_MASK) | (va & L2_OFFSET); /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = (pa + i * PAGE_SIZE) | ATTR_DEFAULT | L3_PAGE; } error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakepd in the same block */ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); continue; } else { pa = l2e & ~ATTR_MASK; /* * We always write a page, even if it is zero. If pa * is malformed, write the zeroed tmpbuffer. */ if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) error = blk_write(di, NULL, pa, PAGE_SIZE); else error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; } } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) { error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < 5) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) { printf("Dump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); } diff --git a/sys/arm64/include/minidump.h b/sys/arm64/include/minidump.h index 87aaffc5ec87..c27d2c71bc12 100644 --- a/sys/arm64/include/minidump.h +++ b/sys/arm64/include/minidump.h @@ -1,49 +1,54 @@ /*- * Copyright (c) 2006 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * From i386: FreeBSD: 157909 2006-04-21 04:28:43Z peter * $FreeBSD$ */ #ifndef _MACHINE_MINIDUMP_H_ #define _MACHINE_MINIDUMP_H_ 1 #define MINIDUMP_MAGIC "minidump FreeBSD/arm64" -#define MINIDUMP_VERSION 2 +#define MINIDUMP_VERSION 3 struct minidumphdr { char magic[24]; uint32_t version; uint32_t msgbufsize; uint32_t bitmapsize; uint32_t pmapsize; uint64_t kernbase; uint64_t dmapphys; uint64_t dmapbase; uint64_t dmapend; uint32_t dumpavailsize; +#define MINIDUMP_FLAG_PS_MASK (3 << 0) +#define MINIDUMP_FLAG_PS_4K (0 << 0) +#define MINIDUMP_FLAG_PS_16K (1 << 0) +/* MINIDUMP_FLAG_PS_64K (2 << 0) */ + uint32_t flags; }; #endif /* _MACHINE_MINIDUMP_H_ */