Index: head/lib/libkvm/kvm_minidump_aarch64.c =================================================================== --- head/lib/libkvm/kvm_minidump_aarch64.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_aarch64.c (revision 368307) @@ -1,295 +1,294 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 */ #include __FBSDID("$FreeBSD$"); /* * ARM64 (AArch64) machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/arm64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_aarch64.h" #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; }; static aarch64_pte_t _aarch64_pte_get(kvm_t *kd, u_long pteindex) { aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static int _aarch64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && _kvm_is_minidump(kd)); } static void _aarch64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _aarch64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? le32toh(vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + aarch64_round_page(vmst->hdr.dumpavailsize); /* build physical address lookup table for sparse pages */ sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) + aarch64_round_page(vmst->hdr.pmapsize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE, - sizeof(uint64_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE) == -1) { return (-1); } off += aarch64_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } off += aarch64_round_page(vmst->hdr.pmapsize); return (0); } static int _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; aarch64_physaddr_t offset; aarch64_pte_t l3; kvaddr_t l3_index; aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AARCH64_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~AARCH64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) goto invalid; l3 = _aarch64_pte_get(kd, l3_index); if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: pde not valid"); goto invalid; } a = l3 & ~AARCH64_ATTR_MASK; ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_aarch64_minidump_kvatop called in live kernel!"); return (0); } return (_aarch64_minidump_vatop(kd, va, pa)); } static int _aarch64_native(kvm_t *kd __unused) { #ifdef __aarch64__ return (1); #else return (0); #endif } static vm_prot_t _aarch64_entry_to_prot(aarch64_pte_t pte) { vm_prot_t prot = VM_PROT_READ; /* Source: arm64/arm64/pmap.c:pmap_protect() */ if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0) prot |= VM_PROT_WRITE; if ((pte & AARCH64_ATTR_XN) == 0) prot |= VM_PROT_EXECUTE; return prot; } static int _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t); u_long bmindex, dva, pa, pteindex, va; struct kvm_bitmap bm; vm_prot_t prot; int ret = 0; if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pteindex = 0; pteindex < nptes; pteindex++) { aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex); if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) continue; va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT); pa = pte & ~AARCH64_ATTR_MASK; dva = vm->hdr.dmapbase + pa; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) { goto out; } } while (_kvm_bitmap_next(&bm, &bmindex)) { pa = _kvm_bit_id_pa(kd, bmindex, AARCH64_PAGE_SIZE); if (pa == _KVM_PA_INVALID) break; dva = vm->hdr.dmapbase + pa; if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE)) break; va = 0; prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, AARCH64_PAGE_SIZE, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_aarch64_minidump = { .ka_probe = _aarch64_minidump_probe, .ka_initvtop = _aarch64_minidump_initvtop, .ka_freevtop = _aarch64_minidump_freevtop, .ka_kvatop = _aarch64_minidump_kvatop, .ka_native = _aarch64_native, .ka_walk_pages = _aarch64_minidump_walk_pages, }; KVM_ARCH(kvm_aarch64_minidump); Index: head/lib/libkvm/kvm_minidump_amd64.c =================================================================== --- head/lib/libkvm/kvm_minidump_amd64.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_amd64.c (revision 368307) @@ -1,443 +1,442 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD64 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include #include "../../sys/amd64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_amd64.h" #define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE) #define VM_IS_V1(vm) (vm->hdr.version == 1) #define VA_OFF(vm, va) \ (VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & AMD64_PAGE_MASK)) struct vmstate { struct minidumphdr hdr; }; static vm_prot_t _amd64_entry_to_prot(uint64_t entry) { vm_prot_t prot = VM_PROT_READ; if ((entry & AMD64_PG_RW) != 0) prot |= VM_PROT_WRITE; if ((entry & AMD64_PG_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } /* * Version 2 minidumps use page directory entries, while version 1 use page * table entries. */ static amd64_pde_t _amd64_pde_get(kvm_t *kd, u_long pdeindex) { amd64_pde_t *pde = _kvm_pmap_get(kd, pdeindex, sizeof(*pde)); return le64toh(*pde); } static amd64_pte_t _amd64_pte_get(kvm_t *kd, u_long pteindex) { amd64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } /* Get the first page table entry for a given page directory index. */ static amd64_pte_t * _amd64_pde_first_pte(kvm_t *kd, u_long pdeindex) { u_long *pa; pa = _kvm_pmap_get(kd, pdeindex, sizeof(amd64_pde_t)); if (pa == NULL) return NULL; return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE); } static int _amd64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && _kvm_is_minidump(kd)); } static void _amd64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _amd64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } /* * NB: amd64 minidump header is binary compatible between version 1 * and version 2; version 3 adds the dumpavailsize field */ vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? le32toh(vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + amd64_round_page(vmst->hdr.dumpavailsize); sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) + amd64_round_page(vmst->hdr.pmapsize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE, - sizeof(uint64_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE) == -1) { return (-1); } off += amd64_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } off += amd64_round_page(vmst->hdr.pmapsize); return (0); } static int _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_pte_t pte; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(pte)) goto invalid; pte = _amd64_pte_get(kd, pteindex); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { amd64_pte_t pt[AMD64_NPTEPG]; struct vmstate *vm; amd64_physaddr_t offset; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pteindex; kvaddr_t pdeindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT; if (pdeindex >= vm->hdr.pmapsize / sizeof(pde)) goto invalid; pde = _amd64_pde_get(kd, pdeindex); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pde not valid"); goto invalid; } if ((pde & AMD64_PG_PS) == 0) { a = pde & AMD64_PG_FRAME; /* TODO: Just read the single PTE */ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "cannot find page table entry for %ju", (uintmax_t)a); goto invalid; } if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read page table entry for %ju", (uintmax_t)a); goto invalid; } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte = le64toh(pt[pteindex]); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; } else { a = pde & AMD64_PG_PS_FRAME; a += (va & AMD64_PDRMASK) ^ offset; } ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_amd64_minidump_kvatop called in live kernel!"); return (0); } if (((struct vmstate *)kd->vmst)->hdr.version == 1) return (_amd64_minidump_vatop_v1(kd, va, pa)); else return (_amd64_minidump_vatop(kd, va, pa)); } static int _amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long npdes = vm->hdr.pmapsize / sizeof(amd64_pde_t); u_long bmindex, dva, pa, pdeindex, va; struct kvm_bitmap bm; int ret = 0; vm_prot_t prot; unsigned int pgsz = AMD64_PAGE_SIZE; if (vm->hdr.version < 2) return (0); if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pdeindex = 0; pdeindex < npdes; pdeindex++) { amd64_pde_t pde = _amd64_pde_get(kd, pdeindex); amd64_pte_t *ptes; u_long i; va = vm->hdr.kernbase + (pdeindex << AMD64_PDRSHIFT); if ((pde & AMD64_PG_V) == 0) continue; if ((pde & AMD64_PG_PS) != 0) { /* * Large page. Iterate on each 4K page section * within this page. This differs from 4K pages in * that every page here uses the same PDE to * generate permissions. */ pa = (pde & AMD64_PG_PS_FRAME) + ((va & AMD64_PDRMASK) ^ VA_OFF(vm, va)); dva = vm->hdr.dmapbase + pa; _kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE)); if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) { goto out; } continue; } /* 4K pages: pde references another page of entries. */ ptes = _amd64_pde_first_pte(kd, pdeindex); /* Ignore page directory pages that were not dumped. */ if (ptes == NULL) continue; for (i = 0; i < AMD64_NPTEPG; i++) { amd64_pte_t pte = (u_long)ptes[i]; pa = pte & AMD64_PG_FRAME; dva = vm->hdr.dmapbase + pa; if ((pte & AMD64_PG_V) != 0) { _kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE)); if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _amd64_entry_to_prot(pte), pgsz, 0)) { goto out; } } va += AMD64_PAGE_SIZE; } } while (_kvm_bitmap_next(&bm, &bmindex)) { pa = _kvm_bit_id_pa(kd, bmindex, AMD64_PAGE_SIZE); if (pa == _KVM_PA_INVALID) break; dva = vm->hdr.dmapbase + pa; if (vm->hdr.dmapend < (dva + pgsz)) break; va = 0; /* amd64/pmap.c: create_pagetables(): dmap always R|W. */ prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, pgsz, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_amd64_minidump = { .ka_probe = _amd64_minidump_probe, .ka_initvtop = _amd64_minidump_initvtop, .ka_freevtop = _amd64_minidump_freevtop, .ka_kvatop = _amd64_minidump_kvatop, .ka_native = _amd64_native, .ka_walk_pages = _amd64_minidump_walk_pages, }; KVM_ARCH(kvm_amd64_minidump); Index: head/lib/libkvm/kvm_minidump_arm.c =================================================================== --- head/lib/libkvm/kvm_minidump_arm.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_arm.c (revision 368307) @@ -1,279 +1,278 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_i386.c,v 1.2 2006/06/05 08:51:14 */ #include __FBSDID("$FreeBSD$"); /* * ARM machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include #include #include "../../sys/arm/include/minidump.h" #include "kvm_private.h" #include "kvm_arm.h" #define arm_round_page(x) roundup2((kvaddr_t)(x), ARM_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; unsigned char ei_data; }; static arm_pt_entry_t _arm_pte_get(kvm_t *kd, u_long pteindex) { arm_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return _kvm32toh(kd, *pte); } static int _arm_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && _kvm_is_minidump(kd)); } static void _arm_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _arm_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase); vmst->hdr.arch = _kvm32toh(kd, vmst->hdr.arch); vmst->hdr.mmuformat = _kvm32toh(kd, vmst->hdr.mmuformat); if (vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_UNKNOWN) { /* This is a safe default as 1K pages are not used. */ vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; } vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? _kvm32toh(kd, vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + arm_round_page(vmst->hdr.dumpavailsize); sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) + arm_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE, - sizeof(uint32_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE) == -1) { return (-1); } off += arm_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += arm_round_page(vmst->hdr.ptesize); return (0); } static int _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; arm_pt_entry_t pte; arm_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } vm = kd->vmst; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _arm_pte_get(kd, pteindex); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not valid"); goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page -> convert to be like 4K page */ offset = va & ARM_L2_S_OFFSET; a = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not supported"); goto invalid; } /* 4K page */ offset = va & ARM_L2_S_OFFSET; a = pte & ARM_L2_S_FRAME; } ofs = _kvm_pt_find(kd, a, ARM_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (ARM_PAGE_SIZE - offset); } else _kvm_err(kd, kd->program, "_arm_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static vm_prot_t _arm_entry_to_prot(kvm_t *kd, arm_pt_entry_t pte) { struct vmstate *vm = kd->vmst; vm_prot_t prot = VM_PROT_READ; /* Source: arm/arm/pmap-v4.c:pmap_fault_fixup() */ if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4) { if (pte & ARM_L2_S_PROT_W) prot |= VM_PROT_WRITE; return prot; } /* Source: arm/arm/pmap-v6.c:pmap_protect() */ if ((pte & ARM_PTE2_RO) == 0) prot |= VM_PROT_WRITE; if ((pte & ARM_PTE2_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } static int _arm_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.ptesize / sizeof(arm_pt_entry_t); u_long dva, pa, pteindex, va; for (pteindex = 0; pteindex < nptes; pteindex++) { arm_pt_entry_t pte = _arm_pte_get(kd, pteindex); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) continue; va = vm->hdr.kernbase + (pteindex << ARM_PAGE_SHIFT); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page */ pa = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { continue; } /* 4K page */ pa = pte & ARM_L2_S_FRAME; } dva = 0; /* no direct map on this platform */ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _arm_entry_to_prot(kd, pte), ARM_PAGE_SIZE, 0)) return (0); } return (1); } static struct kvm_arch kvm_arm_minidump = { .ka_probe = _arm_minidump_probe, .ka_initvtop = _arm_minidump_initvtop, .ka_freevtop = _arm_minidump_freevtop, .ka_kvatop = _arm_minidump_kvatop, .ka_native = _arm_native, .ka_walk_pages = _arm_minidump_walk_pages, }; KVM_ARCH(kvm_arm_minidump); Index: head/lib/libkvm/kvm_minidump_i386.c =================================================================== --- head/lib/libkvm/kvm_minidump_i386.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_i386.c (revision 368307) @@ -1,347 +1,346 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * i386 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include #include "../../sys/i386/include/minidump.h" #include #include "kvm_private.h" #include "kvm_i386.h" #define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; }; static i386_pte_pae_t _i386_pte_pae_get(kvm_t *kd, u_long pteindex) { i386_pte_pae_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static i386_pte_t _i386_pte_get(kvm_t *kd, u_long pteindex) { i386_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le32toh(*pte); } static int _i386_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && _kvm_is_minidump(kd)); } static void _i386_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _i386_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize); vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase); vmst->hdr.paemode = le32toh(vmst->hdr.paemode); vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? le32toh(vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + i386_round_page(vmst->hdr.dumpavailsize); sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) + i386_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE, - sizeof(uint32_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE) == -1) { return (-1); } off += i386_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += i386_round_page(vmst->hdr.ptesize); return (0); } static int _i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_pae_t offset; i386_pte_pae_t pte; kvaddr_t pteindex; i386_physaddr_pae_t a; off_t ofs; vm = kd->vmst; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _i386_pte_pae_get(kd, pteindex); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME_PAE; ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_t offset; i386_pte_t pte; kvaddr_t pteindex; i386_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _i386_pte_get(kd, pteindex); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME; ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!"); return (0); } if (kd->vmst->hdr.paemode) return (_i386_minidump_vatop_pae(kd, va, pa)); else return (_i386_minidump_vatop(kd, va, pa)); } static vm_prot_t _i386_entry_to_prot(uint64_t pte) { vm_prot_t prot = VM_PROT_READ; /* Source: i386/pmap.c:pmap_protect() */ if (pte & I386_PG_RW) prot |= VM_PROT_WRITE; if ((pte & I386_PG_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } struct i386_iter { kvm_t *kd; u_long nptes; u_long pteindex; }; static void _i386_iterator_init(struct i386_iter *it, kvm_t *kd) { struct vmstate *vm = kd->vmst; it->kd = kd; it->pteindex = 0; if (vm->hdr.paemode) { it->nptes = vm->hdr.ptesize / sizeof(i386_pte_pae_t); } else { it->nptes = vm->hdr.ptesize / sizeof(i386_pte_t); } return; } static int _i386_iterator_next(struct i386_iter *it, u_long *pa, u_long *va, u_long *dva, vm_prot_t *prot) { struct vmstate *vm = it->kd->vmst; i386_pte_t pte32; i386_pte_pae_t pte64; int found = 0; *dva = 0; *pa = 0; *va = 0; *dva = 0; *prot = 0; for (; it->pteindex < it->nptes && found == 0; it->pteindex++) { if (vm->hdr.paemode) { pte64 = _i386_pte_pae_get(it->kd, it->pteindex); if ((pte64 & I386_PG_V) == 0) continue; *prot = _i386_entry_to_prot(pte64); *pa = pte64 & I386_PG_FRAME_PAE; } else { pte32 = _i386_pte_get(it->kd, it->pteindex); if ((pte32 & I386_PG_V) == 0) continue; *prot = _i386_entry_to_prot(pte32); *pa = pte32 & I386_PG_FRAME; } *va = vm->hdr.kernbase + (it->pteindex << I386_PAGE_SHIFT); found = 1; } return found; } static int _i386_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct i386_iter it; u_long dva, pa, va; vm_prot_t prot; _i386_iterator_init(&it, kd); while (_i386_iterator_next(&it, &pa, &va, &dva, &prot)) { if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, I386_PAGE_SIZE, 0)) { return (0); } } return (1); } static struct kvm_arch kvm_i386_minidump = { .ka_probe = _i386_minidump_probe, .ka_initvtop = _i386_minidump_initvtop, .ka_freevtop = _i386_minidump_freevtop, .ka_kvatop = _i386_minidump_kvatop, .ka_native = _i386_native, .ka_walk_pages = _i386_minidump_walk_pages, }; KVM_ARCH(kvm_i386_minidump); Index: head/lib/libkvm/kvm_minidump_mips.c =================================================================== --- head/lib/libkvm/kvm_minidump_mips.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_mips.c (revision 368307) @@ -1,373 +1,372 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_arm.c r214223 */ #include __FBSDID("$FreeBSD$"); /* * MIPS machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include #include "../../sys/mips/include/cca.h" #define _KVM_MINIDUMP #include "../../sys/mips/include/cpuregs.h" #include "../../sys/mips/include/minidump.h" #include "kvm_private.h" #include "kvm_mips.h" #define mips_round_page(x) roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; int pte_size; }; static int _mips_minidump_probe(kvm_t *kd) { if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 && kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); if (kd->nlehdr.e_machine != EM_MIPS) return (0); return (_kvm_is_minidump(kd)); } static void _mips_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _mips_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 || kd->nlehdr.e_flags & EF_MIPS_ABI2) vmst->pte_size = 64; else vmst->pte_size = 32; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase); vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase); vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend); vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? _kvm32toh(kd, vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + mips_round_page(vmst->hdr.dumpavailsize); sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) + mips_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE, - sizeof(uint32_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE) == -1) { return (-1); } off += mips_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += mips_round_page(vmst->hdr.ptesize); return (0); } static int _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; mips_physaddr_t offset, a; kvaddr_t pteindex; u_long valid; off_t ofs; mips32_pte_t pte32; mips64_pte_t pte64; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!"); return (0); } offset = va & MIPS_PAGE_MASK; /* Operate with page-aligned address */ va &= ~MIPS_PAGE_MASK; vm = kd->vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) { a = va & MIPS_XKPHYS_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } else { if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT; if (vm->pte_size == 64) { valid = pteindex < vm->hdr.ptesize / sizeof(pte64); if (pteindex >= vm->hdr.ptesize / sizeof(pte64)) goto invalid; pte64 = _mips64_pte_get(kd, pteindex); valid = pte64 & MIPS_PTE_V; if (valid) a = MIPS64_PTE_TO_PA(pte64); } else { if (pteindex >= vm->hdr.ptesize / sizeof(pte32)) goto invalid; pte32 = _mips32_pte_get(kd, pteindex); valid = pte32 & MIPS_PTE_V; if (valid) a = MIPS32_PTE_TO_PA(pte32); } if (!valid) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte " "not valid"); goto invalid; } } else { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); return (0); } found: ofs = _kvm_pt_find(kd, a, MIPS_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical " "address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (MIPS_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int #ifdef __mips__ _mips_native(kvm_t *kd) #else _mips_native(kvm_t *kd __unused) #endif { #ifdef __mips__ #ifdef __mips_n64 if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); #else if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32) return (0); #ifdef __mips_n32 if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2)) return (0); #else if (kd->nlehdr.e_flags & EF_MIPS_ABI2) return (0); #endif #endif #if _BYTE_ORDER == _LITTLE_ENDIAN return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); #else return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); #endif #else return (0); #endif } struct mips_iter { kvm_t *kd; u_long nptes; u_long pteindex; }; static void _mips_iterator_init(struct mips_iter *it, kvm_t *kd) { struct vmstate *vm = kd->vmst; it->kd = kd; it->pteindex = 0; if (vm->pte_size == 64) it->nptes = vm->hdr.ptesize / sizeof(mips64_pte_t); else it->nptes = vm->hdr.ptesize / sizeof(mips32_pte_t); return; } static int _mips_iterator_next(struct mips_iter *it, u_long *pa, u_long *va, u_long *dva, vm_prot_t *prot) { struct vmstate *vm = it->kd->vmst; int found = 0; mips64_pte_t pte64; mips32_pte_t pte32; /* * mips/mips/pmap.c: init_pte_prot / pmap_protect indicate that all * pages are R|X at least. */ *prot = VM_PROT_READ | VM_PROT_EXECUTE; *pa = 0; *va = 0; *dva = 0; for (;it->pteindex < it->nptes && found == 0; it->pteindex++) { if (vm->pte_size == 64) { pte64 = _mips64_pte_get(it->kd, it->pteindex); if ((pte64 & MIPS_PTE_V) == 0) continue; if ((pte64 & MIPS64_PTE_RO) == 0) *prot |= VM_PROT_WRITE; *pa = MIPS64_PTE_TO_PA(pte64); } else { pte32 = _mips32_pte_get(it->kd, it->pteindex); if ((pte32 & MIPS_PTE_V) == 0) continue; if ((pte32 & MIPS32_PTE_RO) == 0) *prot |= VM_PROT_WRITE; *pa = MIPS32_PTE_TO_PA(pte32); } *va = vm->hdr.kernbase + (it->pteindex << MIPS_PAGE_SHIFT); found = 1; /* advance pteindex regardless */ } return found; } static int _mips_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct mips_iter it; u_long dva, pa, va; vm_prot_t prot; /* Generate direct mapped entries; need page entries for prot etc? */ if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { /* MIPS_XKPHYS_START..MIPS_XKPHYS_END */ /* MIPS64_KSEG0_START..MIPS64_KSEG0_END */ /* MIPS64_KSEG1_START..MIPS64_KSEG1_START */ } else { /* MIPS32_KSEG0_START..MIPS32_KSEG0_END */ /* MIPS32_KSEG1_START..MIPS32_KSEG1_END */ } _mips_iterator_init(&it, kd); while (_mips_iterator_next(&it, &pa, &va, &dva, &prot)) { if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, MIPS_PAGE_SIZE, 0)) { return (0); } } return (1); } static struct kvm_arch kvm_mips_minidump = { .ka_probe = _mips_minidump_probe, .ka_initvtop = _mips_minidump_initvtop, .ka_freevtop = _mips_minidump_freevtop, .ka_kvatop = _mips_minidump_kvatop, .ka_native = _mips_native, .ka_walk_pages = _mips_minidump_walk_pages, }; KVM_ARCH(kvm_mips_minidump); Index: head/lib/libkvm/kvm_minidump_powerpc64.c =================================================================== --- head/lib/libkvm/kvm_minidump_powerpc64.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_powerpc64.c (revision 368307) @@ -1,213 +1,212 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2019 Leandro Lupori * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_riscv.c */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "../../sys/powerpc/include/minidump.h" #include "kvm_private.h" #include "kvm_powerpc64.h" static int _powerpc64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) && _kvm_is_minidump(kd)); } static void _powerpc64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm == NULL) return; if (PPC64_MMU_OPS(kd)) PPC64_MMU_OP(kd, cleanup); free(vm); kd->vmst = NULL; } static int _powerpc64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; struct minidumphdr *hdr; off_t dump_avail_off, bitmap_off, pmap_off, sparse_off; const char *mmu_name; /* Alloc VM */ vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } hdr = &vmst->hdr; kd->vmst = vmst; PPC64_MMU_OPS(kd) = NULL; /* Read minidump header */ if (pread(kd->pmfd, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) { _kvm_err(kd, kd->program, "cannot read minidump header"); goto failed; } /* Check magic */ if (strncmp(MINIDUMP_MAGIC, hdr->magic, sizeof(hdr->magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); goto failed; } /* Check version */ hdr->version = be32toh(hdr->version); if (hdr->version != MINIDUMP_VERSION && hdr->version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, hdr->version); goto failed; } /* Convert header fields to host endian */ hdr->msgbufsize = be32toh(hdr->msgbufsize); hdr->bitmapsize = be32toh(hdr->bitmapsize); hdr->pmapsize = be32toh(hdr->pmapsize); hdr->kernbase = be64toh(hdr->kernbase); hdr->kernend = be64toh(hdr->kernend); hdr->dmapbase = be64toh(hdr->dmapbase); hdr->dmapend = be64toh(hdr->dmapend); hdr->hw_direct_map = be32toh(hdr->hw_direct_map); hdr->startkernel = be64toh(hdr->startkernel); hdr->endkernel = be64toh(hdr->endkernel); hdr->dumpavailsize = hdr->version == MINIDUMP_VERSION ? be32toh(hdr->dumpavailsize) : 0; vmst->kimg_start = PPC64_KERNBASE; vmst->kimg_end = PPC64_KERNBASE + hdr->endkernel - hdr->startkernel; /* dump header */ dprintf("%s: mmu_name=%s,\n\t" "msgbufsize=0x%jx, bitmapsize=0x%jx, pmapsize=0x%jx, " "kernbase=0x%jx, kernend=0x%jx,\n\t" "dmapbase=0x%jx, dmapend=0x%jx, hw_direct_map=%d, " "startkernel=0x%jx, endkernel=0x%jx\n\t" "kimg_start=0x%jx, kimg_end=0x%jx\n", __func__, hdr->mmu_name, (uintmax_t)hdr->msgbufsize, (uintmax_t)hdr->bitmapsize, (uintmax_t)hdr->pmapsize, (uintmax_t)hdr->kernbase, (uintmax_t)hdr->kernend, (uintmax_t)hdr->dmapbase, (uintmax_t)hdr->dmapend, hdr->hw_direct_map, hdr->startkernel, hdr->endkernel, (uintmax_t)vmst->kimg_start, (uintmax_t)vmst->kimg_end); /* Detect and initialize MMU */ mmu_name = hdr->mmu_name; if (strcmp(mmu_name, PPC64_MMU_G5) == 0 || strcmp(mmu_name, PPC64_MMU_PHYP) == 0) PPC64_MMU_OPS(kd) = ppc64_mmu_ops_hpt; else { _kvm_err(kd, kd->program, "unsupported MMU: %s", mmu_name); goto failed; } if (PPC64_MMU_OP(kd, init) == -1) goto failed; /* Get dump parts' offsets */ dump_avail_off = PPC64_PAGE_SIZE + ppc64_round_page(hdr->msgbufsize); bitmap_off = dump_avail_off + ppc64_round_page(hdr->dumpavailsize); pmap_off = bitmap_off + ppc64_round_page(hdr->bitmapsize); sparse_off = pmap_off + ppc64_round_page(hdr->pmapsize); /* dump offsets */ dprintf("%s: msgbuf_off=0x%jx, bitmap_off=0x%jx, pmap_off=0x%jx, " "sparse_off=0x%jx\n", __func__, (uintmax_t)PPC64_PAGE_SIZE, (uintmax_t)bitmap_off, (uintmax_t)pmap_off, (uintmax_t)sparse_off); /* build physical address lookup table for sparse pages */ if (_kvm_pt_init(kd, hdr->dumpavailsize, dump_avail_off, - hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE, - sizeof(uint64_t)) == -1) + hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE) == -1) goto failed; if (_kvm_pmap_init(kd, hdr->pmapsize, pmap_off) == -1) goto failed; return (0); failed: _powerpc64_minidump_freevtop(kd); return (-1); } static int _powerpc64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "%s called in live kernel!", __func__); return (0); } return (PPC64_MMU_OP(kd, kvatop, va, pa)); } static int _powerpc64_native(kvm_t *kd __unused) { #ifdef __powerpc64__ return (1); #else return (0); #endif } static kssize_t _powerpc64_kerndisp(kvm_t *kd) { return (kd->vmst->hdr.startkernel - PPC64_KERNBASE); } static int _powerpc64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { return (PPC64_MMU_OP(kd, walk_pages, cb, arg)); } static struct kvm_arch kvm_powerpc64_minidump = { .ka_probe = _powerpc64_minidump_probe, .ka_initvtop = _powerpc64_minidump_initvtop, .ka_freevtop = _powerpc64_minidump_freevtop, .ka_kvatop = _powerpc64_minidump_kvatop, .ka_walk_pages = _powerpc64_minidump_walk_pages, .ka_native = _powerpc64_native, .ka_kerndisp = _powerpc64_kerndisp, }; KVM_ARCH(kvm_powerpc64_minidump); Index: head/lib/libkvm/kvm_minidump_riscv.c =================================================================== --- head/lib/libkvm/kvm_minidump_riscv.c (revision 368306) +++ head/lib/libkvm/kvm_minidump_riscv.c (revision 368307) @@ -1,296 +1,295 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2019 Mitchell Horne * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 */ #include __FBSDID("$FreeBSD$"); /* * RISC-V machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/riscv/include/minidump.h" #include #include "kvm_private.h" #include "kvm_riscv.h" #define riscv_round_page(x) roundup2((kvaddr_t)(x), RISCV_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; }; static riscv_pt_entry_t _riscv_pte_get(kvm_t *kd, u_long pteindex) { riscv_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static int _riscv_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_RISCV) && _kvm_is_minidump(kd)); } static void _riscv_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _riscv_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, dump_avail_off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ? le32toh(vmst->hdr.dumpavailsize) : 0; /* Skip header and msgbuf */ dump_avail_off = RISCV_PAGE_SIZE + riscv_round_page(vmst->hdr.msgbufsize); /* Skip dump_avail */ off = dump_avail_off + riscv_round_page(vmst->hdr.dumpavailsize); /* build physical address lookup table for sparse pages */ sparse_off = off + riscv_round_page(vmst->hdr.bitmapsize) + riscv_round_page(vmst->hdr.pmapsize); if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off, - vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE, - sizeof(uint64_t)) == -1) { + vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE) == -1) { return (-1); } off += riscv_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } off += riscv_round_page(vmst->hdr.pmapsize); return (0); } static int _riscv_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; riscv_physaddr_t offset; riscv_pt_entry_t l3; kvaddr_t l3_index; riscv_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & RISCV_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~RISCV_PAGE_MASK; ofs = _kvm_pt_find(kd, a, RISCV_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_riscv_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (RISCV_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { l3_index = (va - vm->hdr.kernbase) >> RISCV_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) goto invalid; l3 = _riscv_pte_get(kd, l3_index); if ((l3 & RISCV_PTE_V) == 0 || (l3 & RISCV_PTE_RWX) == 0) { _kvm_err(kd, kd->program, "_riscv_minidump_vatop: pte not valid"); goto invalid; } a = (l3 >> RISCV_PTE_PPN0_S) << RISCV_L3_SHIFT; ofs = _kvm_pt_find(kd, a, RISCV_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_riscv_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (RISCV_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_riscv_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _riscv_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_riscv_minidump_kvatop called in live kernel!"); return (0); } return (_riscv_minidump_vatop(kd, va, pa)); } static int _riscv_native(kvm_t *kd __unused) { #ifdef __riscv return (1); #else return (0); #endif } static vm_prot_t _riscv_entry_to_prot(riscv_pt_entry_t pte) { vm_prot_t prot = VM_PROT_READ; if ((pte & RISCV_PTE_W) != 0) prot |= VM_PROT_WRITE; if ((pte & RISCV_PTE_X) != 0) prot |= VM_PROT_EXECUTE; return prot; } static int _riscv_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.pmapsize / sizeof(riscv_pt_entry_t); u_long bmindex, dva, pa, pteindex, va; struct kvm_bitmap bm; vm_prot_t prot; int ret = 0; if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pteindex = 0; pteindex < nptes; pteindex++) { riscv_pt_entry_t pte = _riscv_pte_get(kd, pteindex); if (((pte & RISCV_PTE_V) == 0) || ((pte & RISCV_PTE_RWX) == 0)) continue; va = vm->hdr.kernbase + (pteindex << RISCV_L3_SHIFT); pa = (pte >> RISCV_PTE_PPN0_S) << RISCV_L3_SHIFT; dva = vm->hdr.dmapbase + pa; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _riscv_entry_to_prot(pte), RISCV_PAGE_SIZE, 0)) { goto out; } } while (_kvm_bitmap_next(&bm, &bmindex)) { pa = _kvm_bit_id_pa(kd, bmindex, RISCV_PAGE_SIZE); if (pa == _KVM_PA_INVALID) break; dva = vm->hdr.dmapbase + pa; if (vm->hdr.dmapend < (dva + RISCV_PAGE_SIZE)) break; va = 0; prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, RISCV_PAGE_SIZE, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_riscv_minidump = { .ka_probe = _riscv_minidump_probe, .ka_initvtop = _riscv_minidump_initvtop, .ka_freevtop = _riscv_minidump_freevtop, .ka_kvatop = _riscv_minidump_kvatop, .ka_native = _riscv_native, .ka_walk_pages = _riscv_minidump_walk_pages, }; KVM_ARCH(kvm_riscv_minidump); Index: head/lib/libkvm/kvm_private.c =================================================================== --- head/lib/libkvm/kvm_private.c (revision 368306) +++ head/lib/libkvm/kvm_private.c (revision 368307) @@ -1,841 +1,827 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #define _WANT_VNET #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "kvm_private.h" /* * Routines private to libkvm. */ /* from src/lib/libc/gen/nlist.c */ int __fdnlist(int, struct nlist *); /* * Report an error using printf style arguments. "program" is kd->program * on hard errors, and 0 on soft errors, so that under sun error emulation, * only hard errors are printed out (otherwise, programs like gdb will * generate tons of error messages when trying to access bogus pointers). */ void _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (program != NULL) { (void)fprintf(stderr, "%s: ", program); (void)vfprintf(stderr, fmt, ap); (void)fputc('\n', stderr); } else (void)vsnprintf(kd->errbuf, sizeof(kd->errbuf), fmt, ap); va_end(ap); } void _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...) { va_list ap; int n; va_start(ap, fmt); if (program != NULL) { (void)fprintf(stderr, "%s: ", program); (void)vfprintf(stderr, fmt, ap); (void)fprintf(stderr, ": %s\n", strerror(errno)); } else { char *cp = kd->errbuf; (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap); n = strlen(cp); (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s", strerror(errno)); } va_end(ap); } void * _kvm_malloc(kvm_t *kd, size_t n) { void *p; if ((p = calloc(n, sizeof(char))) == NULL) _kvm_err(kd, kd->program, "can't allocate %zu bytes: %s", n, strerror(errno)); return (p); } int _kvm_probe_elf_kernel(kvm_t *kd, int class, int machine) { return (kd->nlehdr.e_ident[EI_CLASS] == class && ((machine == EM_PPC || machine == EM_PPC64) ? kd->nlehdr.e_type == ET_DYN : kd->nlehdr.e_type == ET_EXEC) && kd->nlehdr.e_machine == machine); } int _kvm_is_minidump(kvm_t *kd) { char minihdr[8]; if (kd->rawdump) return (0); if (pread(kd->pmfd, &minihdr, 8, 0) == 8 && memcmp(&minihdr, "minidump", 8) == 0) return (1); return (0); } /* * The powerpc backend has a hack to strip a leading kerneldump * header from the core before treating it as an ELF header. * * We can add that here if we can get a change to libelf to support * an initial offset into the file. Alternatively we could patch * savecore to extract cores from a regular file instead. */ int _kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp) { GElf_Ehdr ehdr; GElf_Phdr *phdr; Elf *elf; size_t i, phnum; elf = elf_begin(kd->pmfd, ELF_C_READ, NULL); if (elf == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); return (-1); } if (elf_kind(elf) != ELF_K_ELF) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (gelf_getehdr(elf, &ehdr) == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } if (ehdr.e_type != ET_CORE) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (ehdr.e_machine != kd->nlehdr.e_machine) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (elf_getphdrnum(elf, &phnum) == -1) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } phdr = calloc(phnum, sizeof(*phdr)); if (phdr == NULL) { _kvm_err(kd, kd->program, "failed to allocate phdrs"); goto bad; } for (i = 0; i < phnum; i++) { if (gelf_getphdr(elf, i, &phdr[i]) == NULL) { free(phdr); _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } } elf_end(elf); *phnump = phnum; *phdrp = phdr; return (0); bad: elf_end(elf); return (-1); } /* * Transform v such that only bits [bit0, bitN) may be set. Generates a * bitmask covering the number of bits, then shifts so +bit0+ is the first. */ static uint64_t bitmask_range(uint64_t v, uint64_t bit0, uint64_t bitN) { if (bit0 == 0 && bitN == BITS_IN(v)) return (v); return (v & (((1ULL << (bitN - bit0)) - 1ULL) << bit0)); } /* * Returns the number of bits in a given byte array range starting at a * given base, from bit0 to bitN. bit0 may be non-zero in the case of * counting backwards from bitN. */ static uint64_t popcount_bytes(uint64_t *addr, uint32_t bit0, uint32_t bitN) { uint32_t res = bitN - bit0; uint64_t count = 0; uint32_t bound; /* Align to 64-bit boundary on the left side if needed. */ if ((bit0 % BITS_IN(*addr)) != 0) { bound = MIN(bitN, roundup2(bit0, BITS_IN(*addr))); count += __bitcount64(bitmask_range(*addr, bit0, bound)); res -= (bound - bit0); addr++; } while (res > 0) { bound = MIN(res, BITS_IN(*addr)); count += __bitcount64(bitmask_range(*addr, 0, bound)); res -= bound; addr++; } return (count); } void * _kvm_pmap_get(kvm_t *kd, u_long idx, size_t len) { uintptr_t off = idx * len; if ((off_t)off >= kd->pt_sparse_off) return (NULL); return (void *)((uintptr_t)kd->page_map + off); } void * _kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size) { off_t off; uintptr_t addr; off = _kvm_pt_find(kd, pa, page_size); if (off == -1) return NULL; addr = (uintptr_t)kd->page_map + off; if (off >= kd->pt_sparse_off) addr = (uintptr_t)kd->sparse_map + (off - kd->pt_sparse_off); return (void *)addr; } int _kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off, - size_t map_len, off_t map_off, off_t sparse_off, int page_size, - int word_size) + size_t map_len, off_t map_off, off_t sparse_off, int page_size) { uint64_t *addr; uint32_t *popcount_bin; int bin_popcounts = 0; uint64_t pc_bins, res; ssize_t rd; kd->dump_avail_size = dump_avail_size; if (dump_avail_size > 0) { kd->dump_avail = mmap(NULL, kd->dump_avail_size, PROT_READ, MAP_PRIVATE, kd->pmfd, dump_avail_off); } else { /* * Older version minidumps don't provide dump_avail[], * so the bitmap is fully populated from 0 to * last_pa. Create an implied dump_avail that * expresses this. */ - kd->dump_avail = calloc(4, word_size); - if (word_size == sizeof(uint32_t)) { - ((uint32_t *)kd->dump_avail)[1] = _kvm32toh(kd, - map_len * 8 * page_size); - } else { - kd->dump_avail[1] = _kvm64toh(kd, - map_len * 8 * page_size); - } + kd->dump_avail = calloc(4, sizeof(uint64_t)); + kd->dump_avail[1] = _kvm64toh(kd, map_len * 8 * page_size); } /* * Map the bitmap specified by the arguments. */ kd->pt_map = _kvm_malloc(kd, map_len); if (kd->pt_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %zu bytes for bitmap", map_len); return (-1); } rd = pread(kd->pmfd, kd->pt_map, map_len, map_off); if (rd < 0 || rd != (ssize_t)map_len) { _kvm_err(kd, kd->program, "cannot read %zu bytes for bitmap", map_len); return (-1); } kd->pt_map_size = map_len; /* * Generate a popcount cache for every POPCOUNT_BITS in the bitmap, * so lookups only have to calculate the number of bits set between * a cache point and their bit. This reduces lookups to O(1), * without significantly increasing memory requirements. * * Round up the number of bins so that 'upper half' lookups work for * the final bin, if needed. The first popcount is 0, since no bits * precede bit 0, so add 1 for that also. Without this, extra work * would be needed to handle the first PTEs in _kvm_pt_find(). */ addr = kd->pt_map; res = map_len; pc_bins = 1 + (res * NBBY + POPCOUNT_BITS / 2) / POPCOUNT_BITS; kd->pt_popcounts = calloc(pc_bins, sizeof(uint32_t)); if (kd->pt_popcounts == NULL) { _kvm_err(kd, kd->program, "cannot allocate popcount bins"); return (-1); } for (popcount_bin = &kd->pt_popcounts[1]; res > 0; addr++, res -= sizeof(*addr)) { *popcount_bin += popcount_bytes(addr, 0, MIN(res * NBBY, BITS_IN(*addr))); if (++bin_popcounts == POPCOUNTS_IN(*addr)) { popcount_bin++; *popcount_bin = *(popcount_bin - 1); bin_popcounts = 0; } } assert(pc_bins * sizeof(*popcount_bin) == ((uintptr_t)popcount_bin - (uintptr_t)kd->pt_popcounts)); kd->pt_sparse_off = sparse_off; kd->pt_sparse_size = (uint64_t)*popcount_bin * page_size; kd->pt_page_size = page_size; - kd->pt_word_size = word_size; /* * Map the sparse page array. This is useful for performing point * lookups of specific pages, e.g. for kvm_walk_pages. Generally, * this is much larger than is reasonable to read in up front, so * mmap it in instead. */ kd->sparse_map = mmap(NULL, kd->pt_sparse_size, PROT_READ, MAP_PRIVATE, kd->pmfd, kd->pt_sparse_off); if (kd->sparse_map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map %" PRIu64 " bytes from fd %d offset %jd for sparse map: %s", kd->pt_sparse_size, kd->pmfd, (intmax_t)kd->pt_sparse_off, strerror(errno)); return (-1); } return (0); } int _kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off) { ssize_t exp_len = pmap_size; kd->page_map_size = pmap_size; kd->page_map_off = pmap_off; kd->page_map = _kvm_malloc(kd, pmap_size); if (kd->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %u bytes " "for page map", pmap_size); return (-1); } if (pread(kd->pmfd, kd->page_map, pmap_size, pmap_off) != exp_len) { _kvm_err(kd, kd->program, "cannot read %d bytes from " "offset %jd for page map", pmap_size, (intmax_t)pmap_off); return (-1); } return (0); } static inline uint64_t dump_avail_n(kvm_t *kd, long i) { - uint32_t *d32; - - if (kd->pt_word_size == sizeof(uint32_t)) { - d32 = (uint32_t *)kd->dump_avail; - return (_kvm32toh(kd, d32[i])); - } else - return (_kvm64toh(kd, kd->dump_avail[i])); + return (_kvm64toh(kd, kd->dump_avail[i])); } uint64_t _kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size) { uint64_t adj; long i; adj = 0; for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) { if (pa >= dump_avail_n(kd, i + 1)) { adj += howmany(dump_avail_n(kd, i + 1), page_size) - dump_avail_n(kd, i) / page_size; } else { return (pa / page_size - dump_avail_n(kd, i) / page_size + adj); } } return (_KVM_BIT_ID_INVALID); } uint64_t _kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size) { uint64_t sz; long i; for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) { sz = howmany(dump_avail_n(kd, i + 1), page_size) - dump_avail_n(kd, i) / page_size; if (bit_id < sz) { return (rounddown2(dump_avail_n(kd, i), page_size) + bit_id * page_size); } bit_id -= sz; } return (_KVM_PA_INVALID); } /* * Find the offset for the given physical page address; returns -1 otherwise. * * A page's offset is represented by the sparse page base offset plus the * number of bits set before its bit multiplied by page size. This means * that if a page exists in the dump, it's necessary to know how many pages * in the dump precede it. Reduce this O(n) counting to O(1) by caching the * number of bits set at POPCOUNT_BITS intervals. * * Then to find the number of pages before the requested address, simply * index into the cache and count the number of bits set between that cache * bin and the page's bit. Halve the number of bytes that have to be * checked by also counting down from the next higher bin if it's closer. */ off_t _kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size) { uint64_t *bitmap = kd->pt_map; uint64_t pte_bit_id = _kvm_pa_bit_id(kd, pa, page_size); uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap); uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS; uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap)); uint64_t bitN; uint32_t count; /* Check whether the page address requested is in the dump. */ if (pte_bit_id == _KVM_BIT_ID_INVALID || pte_bit_id >= (kd->pt_map_size * NBBY) || (bitmap[pte_u64] & pte_mask) == 0) return (-1); /* * Add/sub popcounts from the bitmap until the PTE's bit is reached. * For bits that are in the upper half between the calculated * popcount id and the next one, use the next one and subtract to * minimize the number of popcounts required. */ if ((pte_bit_id % POPCOUNT_BITS) < (POPCOUNT_BITS / 2)) { count = kd->pt_popcounts[popcount_id] + popcount_bytes( bitmap + popcount_id * POPCOUNTS_IN(*bitmap), 0, pte_bit_id - popcount_id * POPCOUNT_BITS); } else { /* * Counting in reverse is trickier, since we must avoid * reading from bytes that are not in range, and invert. */ uint64_t pte_u64_bit_off = pte_u64 * BITS_IN(*bitmap); popcount_id++; bitN = MIN(popcount_id * POPCOUNT_BITS, kd->pt_map_size * BITS_IN(uint8_t)); count = kd->pt_popcounts[popcount_id] - popcount_bytes( bitmap + pte_u64, pte_bit_id - pte_u64_bit_off, bitN - pte_u64_bit_off); } /* * This can only happen if the core is truncated. Treat these * entries as if they don't exist, since their backing doesn't. */ if (count >= (kd->pt_sparse_size / page_size)) return (-1); return (kd->pt_sparse_off + (uint64_t)count * page_size); } static int kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list) { kvaddr_t addr; int error, nfail; if (kd->resolve_symbol == NULL) { struct nlist *nl; int count, i; for (count = 0; list[count].n_name != NULL && list[count].n_name[0] != '\0'; count++) ; nl = calloc(count + 1, sizeof(*nl)); for (i = 0; i < count; i++) nl[i].n_name = list[i].n_name; nfail = __fdnlist(kd->nlfd, nl); for (i = 0; i < count; i++) { list[i].n_type = nl[i].n_type; list[i].n_value = nl[i].n_value; } free(nl); return (nfail); } nfail = 0; while (list->n_name != NULL && list->n_name[0] != '\0') { error = kd->resolve_symbol(list->n_name, &addr); if (error != 0) { nfail++; list->n_value = 0; list->n_type = 0; } else { list->n_value = addr; list->n_type = N_DATA | N_EXT; } list++; } return (nfail); } /* * Walk the list of unresolved symbols, generate a new list and prefix the * symbol names, try again, and merge back what we could resolve. */ static int kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing, const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t)) { struct kvm_nlist *n, *np, *p; char *cp, *ce; const char *ccp; size_t len; int slen, unresolved; /* * Calculate the space we need to malloc for nlist and names. * We are going to store the name twice for later lookups: once * with the prefix and once the unmodified name delmited by \0. */ len = 0; unresolved = 0; for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; len += sizeof(struct kvm_nlist) + strlen(prefix) + 2 * (strlen(p->n_name) + 1); unresolved++; } if (unresolved == 0) return (unresolved); /* Add space for the terminating nlist entry. */ len += sizeof(struct kvm_nlist); unresolved++; /* Alloc one chunk for (nlist, [names]) and setup pointers. */ n = np = malloc(len); bzero(n, len); if (n == NULL) return (missing); cp = ce = (char *)np; cp += unresolved * sizeof(struct kvm_nlist); ce += len; /* Generate shortened nlist with special prefix. */ unresolved = 0; for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; *np = *p; /* Save the new\0orig. name so we can later match it again. */ slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix, (prefix[0] != '\0' && p->n_name[0] == '_') ? (p->n_name + 1) : p->n_name, '\0', p->n_name); if (slen < 0 || slen >= ce - cp) continue; np->n_name = cp; cp += slen + 1; np++; unresolved++; } /* Do lookup on the reduced list. */ np = n; unresolved = kvm_fdnlist(kd, np); /* Check if we could resolve further symbols and update the list. */ if (unresolved >= 0 && unresolved < missing) { /* Find the first freshly resolved entry. */ for (; np->n_name && np->n_name[0]; np++) if (np->n_type != N_UNDF) break; /* * The lists are both in the same order, * so we can walk them in parallel. */ for (p = nl; np->n_name && np->n_name[0] && p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; /* Skip expanded name and compare to orig. one. */ ccp = np->n_name + strlen(np->n_name) + 1; if (strcmp(ccp, p->n_name) != 0) continue; /* Update nlist with new, translated results. */ p->n_type = np->n_type; if (validate_fn) p->n_value = (*validate_fn)(kd, np->n_value); else p->n_value = np->n_value; missing--; /* Find next freshly resolved entry. */ for (np++; np->n_name && np->n_name[0]; np++) if (np->n_type != N_UNDF) break; } } /* We could assert missing = unresolved here. */ free(n); return (unresolved); } int _kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize) { struct kvm_nlist *p; int nvalid; struct kld_sym_lookup lookup; int error; const char *prefix = ""; char symname[1024]; /* XXX-BZ symbol name length limit? */ int tried_vnet, tried_dpcpu; /* * If we can't use the kld symbol lookup, revert to the * slow library call. */ if (!ISALIVE(kd)) { error = kvm_fdnlist(kd, nl); if (error <= 0) /* Hard error or success. */ return (error); if (_kvm_vnet_initialized(kd, initialize)) error = kvm_fdnlist_prefix(kd, nl, error, VNET_SYMPREFIX, _kvm_vnet_validaddr); if (error > 0 && _kvm_dpcpu_initialized(kd, initialize)) error = kvm_fdnlist_prefix(kd, nl, error, DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr); return (error); } /* * We can use the kld lookup syscall. Go through each nlist entry * and look it up with a kldsym(2) syscall. */ nvalid = 0; tried_vnet = 0; tried_dpcpu = 0; again: for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; lookup.version = sizeof(lookup); lookup.symvalue = 0; lookup.symsize = 0; error = snprintf(symname, sizeof(symname), "%s%s", prefix, (prefix[0] != '\0' && p->n_name[0] == '_') ? (p->n_name + 1) : p->n_name); if (error < 0 || error >= (int)sizeof(symname)) continue; lookup.symname = symname; if (lookup.symname[0] == '_') lookup.symname++; if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) { p->n_type = N_TEXT; if (_kvm_vnet_initialized(kd, initialize) && strcmp(prefix, VNET_SYMPREFIX) == 0) p->n_value = _kvm_vnet_validaddr(kd, lookup.symvalue); else if (_kvm_dpcpu_initialized(kd, initialize) && strcmp(prefix, DPCPU_SYMPREFIX) == 0) p->n_value = _kvm_dpcpu_validaddr(kd, lookup.symvalue); else p->n_value = lookup.symvalue; ++nvalid; /* lookup.symsize */ } } /* * Check the number of entries that weren't found. If they exist, * try again with a prefix for virtualized or DPCPU symbol names. */ error = ((p - nl) - nvalid); if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) { tried_vnet = 1; prefix = VNET_SYMPREFIX; goto again; } if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) { tried_dpcpu = 1; prefix = DPCPU_SYMPREFIX; goto again; } /* * Return the number of entries that weren't found. If they exist, * also fill internal error buffer. */ error = ((p - nl) - nvalid); if (error) _kvm_syserr(kd, kd->program, "kvm_nlist"); return (error); } int _kvm_bitmap_init(struct kvm_bitmap *bm, u_long bitmapsize, u_long *idx) { *idx = ULONG_MAX; bm->map = calloc(bitmapsize, sizeof *bm->map); if (bm->map == NULL) return (0); bm->size = bitmapsize; return (1); } void _kvm_bitmap_set(struct kvm_bitmap *bm, u_long bm_index) { uint8_t *byte = &bm->map[bm_index / 8]; if (bm_index / 8 < bm->size) *byte |= (1UL << (bm_index % 8)); } int _kvm_bitmap_next(struct kvm_bitmap *bm, u_long *idx) { u_long first_invalid = bm->size * CHAR_BIT; if (*idx == ULONG_MAX) *idx = 0; else (*idx)++; /* Find the next valid idx. */ for (; *idx < first_invalid; (*idx)++) { unsigned int mask = *idx % CHAR_BIT; if ((bm->map[*idx * CHAR_BIT] & mask) == 0) break; } return (*idx < first_invalid); } void _kvm_bitmap_deinit(struct kvm_bitmap *bm) { free(bm->map); } int _kvm_visit_cb(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg, u_long pa, u_long kmap_vaddr, u_long dmap_vaddr, vm_prot_t prot, size_t len, unsigned int page_size) { unsigned int pgsz = page_size ? page_size : len; struct kvm_page p = { .kp_version = LIBKVM_WALK_PAGES_VERSION, .kp_paddr = pa, .kp_kmap_vaddr = kmap_vaddr, .kp_dmap_vaddr = dmap_vaddr, .kp_prot = prot, .kp_offset = _kvm_pt_find(kd, pa, pgsz), .kp_len = len, }; return cb(&p, arg); } Index: head/lib/libkvm/kvm_private.h =================================================================== --- head/lib/libkvm/kvm_private.h (revision 368306) +++ head/lib/libkvm/kvm_private.h (revision 368307) @@ -1,199 +1,198 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kvm_private.h 8.1 (Berkeley) 6/4/93 * $FreeBSD$ */ #include #include #include struct kvm_arch { int (*ka_probe)(kvm_t *); int (*ka_initvtop)(kvm_t *); void (*ka_freevtop)(kvm_t *); int (*ka_kvatop)(kvm_t *, kvaddr_t, off_t *); int (*ka_native)(kvm_t *); int (*ka_walk_pages)(kvm_t *, kvm_walk_pages_cb_t *, void *); kssize_t (*ka_kerndisp)(kvm_t *); }; #define KVM_ARCH(ka) DATA_SET(kvm_arch, ka) struct __kvm { struct kvm_arch *arch; /* * a string to be prepended to error messages * provided for compatibility with sun's interface * if this value is null, errors are saved in errbuf[] */ const char *program; char *errp; /* XXX this can probably go away */ char errbuf[_POSIX2_LINE_MAX]; #define ISALIVE(kd) ((kd)->vmfd >= 0) int pmfd; /* physical memory file (or crashdump) */ int vmfd; /* virtual memory file (-1 if crashdump) */ int nlfd; /* namelist file (e.g., /kernel) */ GElf_Ehdr nlehdr; /* ELF file header for namelist file */ int (*resolve_symbol)(const char *, kvaddr_t *); struct kinfo_proc *procbase; char *argspc; /* (dynamic) storage for argv strings */ int arglen; /* length of the above */ char **argv; /* (dynamic) storage for argv pointers */ int argc; /* length of above (not actual # present) */ char *argbuf; /* (dynamic) temporary storage */ /* * Kernel virtual address translation state. This only gets filled * in for dead kernels; otherwise, the running kernel (i.e. kmem) * will do the translations for us. It could be big, so we * only allocate it if necessary. */ struct vmstate *vmst; int rawdump; /* raw dump format */ int writable; /* physical memory is writable */ int vnet_initialized; /* vnet fields set up */ kvaddr_t vnet_start; /* start of kernel's vnet region */ kvaddr_t vnet_stop; /* stop of kernel's vnet region */ kvaddr_t vnet_current; /* vnet we're working with */ kvaddr_t vnet_base; /* vnet base of current vnet */ /* * Dynamic per-CPU kernel memory. We translate symbols, on-demand, * to the data associated with dpcpu_curcpu, set with * kvm_dpcpu_setcpu(). */ int dpcpu_initialized; /* dpcpu fields set up */ kvaddr_t dpcpu_start; /* start of kernel's dpcpu region */ kvaddr_t dpcpu_stop; /* stop of kernel's dpcpu region */ u_int dpcpu_maxcpus; /* size of base array */ uintptr_t *dpcpu_off; /* base array, indexed by CPU ID */ u_int dpcpu_curcpu; /* CPU we're currently working with */ kvaddr_t dpcpu_curoff; /* dpcpu base of current CPU */ /* Page table lookup structures. */ uint64_t *pt_map; size_t pt_map_size; uint64_t *dump_avail; /* actually word sized */ size_t dump_avail_size; off_t pt_sparse_off; uint64_t pt_sparse_size; uint32_t *pt_popcounts; unsigned int pt_page_size; - unsigned int pt_word_size; /* Page & sparse map structures. */ void *page_map; uint32_t page_map_size; off_t page_map_off; void *sparse_map; }; struct kvm_bitmap { uint8_t *map; u_long size; }; /* Page table lookup constants. */ #define POPCOUNT_BITS 1024 #define BITS_IN(v) (sizeof(v) * NBBY) #define POPCOUNTS_IN(v) (POPCOUNT_BITS / BITS_IN(v)) /* * Functions used internally by kvm, but across kvm modules. */ static inline uint16_t _kvm16toh(kvm_t *kd, uint16_t val) { if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB) return (le16toh(val)); else return (be16toh(val)); } static inline uint32_t _kvm32toh(kvm_t *kd, uint32_t val) { if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB) return (le32toh(val)); else return (be32toh(val)); } static inline uint64_t _kvm64toh(kvm_t *kd, uint64_t val) { if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB) return (le64toh(val)); else return (be64toh(val)); } uint64_t _kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size); uint64_t _kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size); #define _KVM_PA_INVALID ULONG_MAX #define _KVM_BIT_ID_INVALID ULONG_MAX int _kvm_bitmap_init(struct kvm_bitmap *, u_long, u_long *); void _kvm_bitmap_set(struct kvm_bitmap *, u_long); int _kvm_bitmap_next(struct kvm_bitmap *, u_long *); void _kvm_bitmap_deinit(struct kvm_bitmap *); void _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...) __printflike(3, 4); void _kvm_freeprocs(kvm_t *kd); void *_kvm_malloc(kvm_t *kd, size_t); int _kvm_nlist(kvm_t *, struct kvm_nlist *, int); void *_kvm_realloc(kvm_t *kd, void *, size_t); void _kvm_syserr (kvm_t *kd, const char *program, const char *fmt, ...) __printflike(3, 4); int _kvm_vnet_selectpid(kvm_t *, pid_t); int _kvm_vnet_initialized(kvm_t *, int); kvaddr_t _kvm_vnet_validaddr(kvm_t *, kvaddr_t); int _kvm_dpcpu_initialized(kvm_t *, int); kvaddr_t _kvm_dpcpu_validaddr(kvm_t *, kvaddr_t); int _kvm_probe_elf_kernel(kvm_t *, int, int); int _kvm_is_minidump(kvm_t *); int _kvm_read_core_phdrs(kvm_t *, size_t *, GElf_Phdr **); -int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int, int); +int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int); off_t _kvm_pt_find(kvm_t *, uint64_t, unsigned int); int _kvm_visit_cb(kvm_t *, kvm_walk_pages_cb_t *, void *, u_long, u_long, u_long, vm_prot_t, size_t, unsigned int); int _kvm_pmap_init(kvm_t *, uint32_t, off_t); void * _kvm_pmap_get(kvm_t *, u_long, size_t); void * _kvm_map_get(kvm_t *, u_long, unsigned int); Index: head/sys/arm/arm/minidump_machdep.c =================================================================== --- head/sys/arm/arm/minidump_machdep.c (revision 368306) +++ head/sys/arm/arm/minidump_machdep.c (revision 368307) @@ -1,339 +1,344 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Peter Wemm * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/i386/minidump_machdep.c,v 1.6 2008/08/17 23:27:27 */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include #include #include #include #include #include #include #ifdef SW_WATCHDOG #include #endif #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static uint64_t counter, progress; static int is_dumpable(vm_paddr_t pa) { int i; for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) return (1); } return (0); } #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8) static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, dump_va, 0, fragsz); fragsz = 0; return (error); } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, i, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if (pa != 0) { if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if ((pa & PAGE_MASK) != 0) { printf("address not page aligned\n"); return (EINVAL); } } if (ptr != NULL) { /* Flush any pre-existing pa pages before a virtual dump. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; counter += len; progress -= len; if (counter >> 22) { printf(" %lld", PG2MB(progress >> PAGE_SHIFT)); counter &= (1<<22) - 1; } #ifdef SW_WATCHDOG wdog_kern_pat(WD_LASTVAL); #endif if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { for (i = 0; i < len; i += PAGE_SIZE) dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT); fragsz += len; pa += len; sz -= len; if (fragsz == maxdumpsz) { error = blk_flush(di); if (error) return (error); } } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } /* A buffer for general use. Its size must be one page at least. */ -static char dumpbuf[PAGE_SIZE]; +static char dumpbuf[PAGE_SIZE] __aligned(sizeof(uint64_t)); CTASSERT(sizeof(dumpbuf) % sizeof(pt2_entry_t) == 0); int minidumpsys(struct dumperinfo *di) { struct minidumphdr mdhdr; - uint64_t dumpsize; + uint64_t dumpsize, *dump_avail_buf; uint32_t ptesize; uint32_t pa, prev_pa = 0, count = 0; vm_offset_t va; - int error; + int error, i; char *addr; /* * Flush caches. Note that in the SMP case this operates only on the * current CPU's L1 cache. Before we reach this point, code in either * the system shutdown or kernel debugger has called stop_cpus() to stop * all cores other than this one. Part of the ARM handling of * stop_cpus() is to call wbinv_all() on that core's local L1 cache. So * by time we get to here, all that remains is to flush the L1 for the * current CPU, then the L2. */ dcache_wbinv_poc_all(); counter = 0; /* Walk page table pages, set bits in vm_page_dump */ ptesize = 0; for (va = KERNBASE; va < kernel_vm_end; va += PAGE_SIZE) { pa = pmap_dump_kextract(va, NULL); if (pa != 0 && is_dumpable(pa)) dump_add_page(pa); ptesize += sizeof(pt2_entry_t); } /* Calculate dump size. */ dumpsize = ptesize; dumpsize += round_page(msgbufp->msg_size); - dumpsize += round_page(sizeof(dump_avail)); + dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; progress = dumpsize; /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = msgbufp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = KERNBASE; mdhdr.arch = __ARM_ARCH; mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; - mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); + mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t)); + dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Physical memory: %u MB\n", ptoa((uintmax_t)physmem) / 1048576); printf("Dumping %llu MB:", (long long)dumpsize >> 20); /* Dump my header */ bzero(dumpbuf, sizeof(dumpbuf)); bcopy(&mdhdr, dumpbuf, sizeof(mdhdr)); error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); if (error) goto fail; - /* Dump dump_avail */ - _Static_assert(sizeof(dump_avail) <= sizeof(dumpbuf), + /* Dump dump_avail. Make a copy using 64-bit physical addresses. */ + _Static_assert(nitems(dump_avail) * sizeof(uint64_t) <= sizeof(dumpbuf), "Large dump_avail not handled"); bzero(dumpbuf, sizeof(dumpbuf)); - memcpy(dumpbuf, dump_avail, sizeof(dump_avail)); + dump_avail_buf = (uint64_t *)dumpbuf; + for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { + dump_avail_buf[i] = dump_avail[i]; + dump_avail_buf[i + 1] = dump_avail[i + 1]; + } error = blk_write(di, dumpbuf, 0, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ addr = dumpbuf; for (va = KERNBASE; va < kernel_vm_end; va += PAGE_SIZE) { pmap_dump_kextract(va, (pt2_entry_t *)addr); addr += sizeof(pt2_entry_t); if (addr == dumpbuf + sizeof(dumpbuf)) { error = blk_write(di, dumpbuf, 0, sizeof(dumpbuf)); if (error != 0) goto fail; addr = dumpbuf; } } if (addr != dumpbuf) { error = blk_write(di, dumpbuf, 0, addr - dumpbuf); if (error != 0) goto fail; } /* Dump memory chunks */ VM_PAGE_DUMP_FOREACH(pa) { if (!count) { prev_pa = pa; count++; } else { if (pa == (prev_pa + count * PAGE_SIZE)) count++; else { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pa = pa; } } } if (count) { error = blk_write(di, NULL, prev_pa, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pa = 0; } error = blk_flush(di); if (error) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } Index: head/sys/kern/kern_dump.c =================================================================== --- head/sys/kern/kern_dump.c (revision 368306) +++ head/sys/kern/kern_dump.c (revision 368307) @@ -1,387 +1,387 @@ /*- * Copyright (c) 2002 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); #define MD_ALIGN(x) roundup2((off_t)(x), PAGE_SIZE) /* Handle buffered writes. */ static size_t fragsz; struct dump_pa dump_map[DUMPSYS_MD_PA_NPAIRS]; #if !defined(__powerpc__) void dumpsys_gen_pa_init(void) { int n, idx; bzero(dump_map, sizeof(dump_map)); for (n = 0; n < nitems(dump_map); n++) { idx = n * 2; if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0) break; dump_map[n].pa_start = dump_avail[idx]; dump_map[n].pa_size = dump_avail[idx + 1] - dump_avail[idx]; } } #endif struct dump_pa * dumpsys_gen_pa_next(struct dump_pa *mdp) { if (mdp == NULL) return (&dump_map[0]); mdp++; if (mdp->pa_size == 0) mdp = NULL; return (mdp); } void dumpsys_gen_wbinv_all(void) { } void dumpsys_gen_unmap_chunk(vm_paddr_t pa __unused, size_t chunk __unused, void *va __unused) { } int dumpsys_gen_write_aux_headers(struct dumperinfo *di) { return (0); } int dumpsys_buf_seek(struct dumperinfo *di, size_t sz) { static uint8_t buf[DEV_BSIZE]; size_t nbytes; int error; bzero(buf, sizeof(buf)); while (sz > 0) { nbytes = MIN(sz, sizeof(buf)); error = dump_append(di, buf, 0, nbytes); if (error) return (error); sz -= nbytes; } return (0); } int dumpsys_buf_write(struct dumperinfo *di, char *ptr, size_t sz) { size_t len; int error; while (sz) { len = di->blocksize - fragsz; if (len > sz) len = sz; memcpy((char *)di->blockbuf + fragsz, ptr, len); fragsz += len; ptr += len; sz -= len; if (fragsz == di->blocksize) { error = dump_append(di, di->blockbuf, 0, di->blocksize); if (error) return (error); fragsz = 0; } } return (0); } int dumpsys_buf_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_append(di, di->blockbuf, 0, di->blocksize); fragsz = 0; return (error); } CTASSERT(PAGE_SHIFT < 20); #define PG2MB(pgs) ((pgs + (1 << (20 - PAGE_SHIFT)) - 1) >> (20 - PAGE_SHIFT)) int dumpsys_cb_dumpdata(struct dump_pa *mdp, int seqnr, void *arg) { struct dumperinfo *di = (struct dumperinfo*)arg; vm_paddr_t pa; void *va; uint64_t pgs; size_t counter, sz, chunk; int c, error; u_int maxdumppgs; error = 0; /* catch case in which chunk size is 0 */ counter = 0; /* Update twiddle every 16MB */ va = NULL; pgs = mdp->pa_size / PAGE_SIZE; pa = mdp->pa_start; maxdumppgs = min(di->maxiosize / PAGE_SIZE, MAXDUMPPGS); if (maxdumppgs == 0) /* seatbelt */ maxdumppgs = 1; printf(" chunk %d: %juMB (%ju pages)", seqnr, (uintmax_t)PG2MB(pgs), (uintmax_t)pgs); dumpsys_wbinv_all(); while (pgs) { chunk = pgs; if (chunk > maxdumppgs) chunk = maxdumppgs; sz = chunk << PAGE_SHIFT; counter += sz; if (counter >> 24) { printf(" %ju", (uintmax_t)PG2MB(pgs)); counter &= (1 << 24) - 1; } dumpsys_map_chunk(pa, chunk, &va); wdog_kern_pat(WD_LASTVAL); error = dump_append(di, va, 0, sz); dumpsys_unmap_chunk(pa, chunk, va); if (error) break; pgs -= chunk; pa += sz; /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } printf(" ... %s\n", (error) ? "fail" : "ok"); return (error); } int dumpsys_foreach_chunk(dumpsys_callback_t cb, void *arg) { struct dump_pa *mdp; int error, seqnr; seqnr = 0; mdp = dumpsys_pa_next(NULL); while (mdp != NULL) { error = (*cb)(mdp, seqnr++, arg); if (error) return (-error); mdp = dumpsys_pa_next(mdp); } return (seqnr); } static off_t fileofs; static int cb_dumphdr(struct dump_pa *mdp, int seqnr, void *arg) { struct dumperinfo *di = (struct dumperinfo*)arg; Elf_Phdr phdr; uint64_t size; int error; size = mdp->pa_size; bzero(&phdr, sizeof(phdr)); phdr.p_type = PT_LOAD; phdr.p_flags = PF_R; /* XXX */ phdr.p_offset = fileofs; #ifdef __powerpc__ phdr.p_vaddr = (do_minidump? mdp->pa_start : ~0L); phdr.p_paddr = (do_minidump? ~0L : mdp->pa_start); #else phdr.p_vaddr = mdp->pa_start; phdr.p_paddr = mdp->pa_start; #endif phdr.p_filesz = size; phdr.p_memsz = size; phdr.p_align = PAGE_SIZE; error = dumpsys_buf_write(di, (char*)&phdr, sizeof(phdr)); fileofs += phdr.p_filesz; return (error); } static int cb_size(struct dump_pa *mdp, int seqnr, void *arg) { uint64_t *sz; sz = (uint64_t *)arg; *sz += (uint64_t)mdp->pa_size; return (0); } int dumpsys_generic(struct dumperinfo *di) { static struct kerneldumpheader kdh; Elf_Ehdr ehdr; uint64_t dumpsize; off_t hdrgap; size_t hdrsz; int error; -#if !defined(__powerpc__) || defined(__powerpc64__) +#if MINIDUMP_PAGE_TRACKING == 1 if (do_minidump) return (minidumpsys(di)); #endif bzero(&ehdr, sizeof(ehdr)); ehdr.e_ident[EI_MAG0] = ELFMAG0; ehdr.e_ident[EI_MAG1] = ELFMAG1; ehdr.e_ident[EI_MAG2] = ELFMAG2; ehdr.e_ident[EI_MAG3] = ELFMAG3; ehdr.e_ident[EI_CLASS] = ELF_CLASS; #if BYTE_ORDER == LITTLE_ENDIAN ehdr.e_ident[EI_DATA] = ELFDATA2LSB; #else ehdr.e_ident[EI_DATA] = ELFDATA2MSB; #endif ehdr.e_ident[EI_VERSION] = EV_CURRENT; ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */ ehdr.e_type = ET_CORE; ehdr.e_machine = EM_VALUE; ehdr.e_phoff = sizeof(ehdr); ehdr.e_flags = 0; ehdr.e_ehsize = sizeof(ehdr); ehdr.e_phentsize = sizeof(Elf_Phdr); ehdr.e_shentsize = sizeof(Elf_Shdr); dumpsys_pa_init(); /* Calculate dump size. */ dumpsize = 0L; ehdr.e_phnum = dumpsys_foreach_chunk(cb_size, &dumpsize) + DUMPSYS_NUM_AUX_HDRS; hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize; fileofs = MD_ALIGN(hdrsz); dumpsize += fileofs; hdrgap = fileofs - roundup2((off_t)hdrsz, di->blocksize); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARCH_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %ju MB (%d chunks)\n", (uintmax_t)dumpsize >> 20, ehdr.e_phnum - DUMPSYS_NUM_AUX_HDRS); /* Dump ELF header */ error = dumpsys_buf_write(di, (char*)&ehdr, sizeof(ehdr)); if (error) goto fail; /* Dump program headers */ error = dumpsys_foreach_chunk(cb_dumphdr, di); if (error < 0) goto fail; error = dumpsys_write_aux_headers(di); if (error < 0) goto fail; dumpsys_buf_flush(di); /* * All headers are written using blocked I/O, so we know the * current offset is (still) block aligned. Skip the alignement * in the file to have the segment contents aligned at page * boundary. */ error = dumpsys_buf_seek(di, (size_t)hdrgap); if (error) goto fail; /* Dump memory chunks. */ error = dumpsys_foreach_chunk(dumpsys_cb_dumpdata, di); if (error < 0) goto fail; error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) printf("\nDump failed. Partition too small.\n"); else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } Index: head/sys/mips/mips/minidump_machdep.c =================================================================== --- head/sys/mips/mips/minidump_machdep.c (revision 368306) +++ head/sys/mips/mips/minidump_machdep.c (revision 368307) @@ -1,327 +1,336 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/arm/arm/minidump_machdep.c v214223 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); static struct kerneldumpheader kdh; /* Handle chunked writes. */ static uint64_t counter, progress, dumpsize; /* Just auxiliary bufffer */ -static char tmpbuffer[PAGE_SIZE]; +static char tmpbuffer[PAGE_SIZE] __aligned(sizeof(uint64_t)); extern pd_entry_t *kernel_segmap; static int is_dumpable(vm_paddr_t pa) { vm_page_t m; int i; if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL) return ((m->flags & PG_NODUMP) == 0); for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) return (1); } return (0); } static struct { int min_per; int max_per; int visited; } progress_track[10] = { { 0, 10, 0}, { 10, 20, 0}, { 20, 30, 0}, { 30, 40, 0}, { 40, 50, 0}, { 50, 60, 0}, { 60, 70, 0}, { 70, 80, 0}, { 80, 90, 0}, { 90, 100, 0} }; static void report_progress(uint64_t progress, uint64_t dumpsize) { int sofar, i; sofar = 100 - ((progress * 100) / dumpsize); for (i = 0; i < nitems(progress_track); i++) { if (sofar < progress_track[i].min_per || sofar > progress_track[i].max_per) continue; if (progress_track[i].visited) return; progress_track[i].visited = 1; printf("..%d%%", sofar); return; } } static int write_buffer(struct dumperinfo *di, char *ptr, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = di->maxiosize; if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; while (sz) { len = min(maxdumpsz, sz); counter += len; progress -= len; if (counter >> 22) { report_progress(progress, dumpsize); counter &= (1<<22) - 1; } wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_append(di, ptr, 0, len); if (error) return (error); ptr += len; sz -= len; } else { panic("pa is not supported"); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int minidumpsys(struct dumperinfo *di) { struct minidumphdr mdhdr; + uint64_t *dump_avail_buf; uint32_t ptesize; vm_paddr_t pa; vm_offset_t prev_pte = 0; uint32_t count = 0; vm_offset_t va; pt_entry_t *pte; int i, error; void *dump_va; /* Flush cache */ mips_dcache_wbinv_all(); counter = 0; /* Walk page table pages, set bits in vm_page_dump */ ptesize = 0; for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { ptesize += PAGE_SIZE; pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); for (i = 0; i < NPTEPG; i++) { if (pte_test(&pte[i], PTE_V)) { pa = TLBLO_PTE_TO_PA(pte[i]); if (is_dumpable(pa)) dump_add_page(pa); } } } /* * Now mark pages from 0 to phys_avail[0], that's where kernel * and pages allocated by pmap_steal reside */ for (pa = 0; pa < phys_avail[0]; pa += PAGE_SIZE) { if (is_dumpable(pa)) dump_add_page(pa); } /* Calculate dump size. */ dumpsize = ptesize; dumpsize += round_page(msgbufp->msg_size); - dumpsize += round_page(sizeof(dump_avail)); + dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t)); dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); VM_PAGE_DUMP_FOREACH(pa) { /* Clear out undumpable pages now if needed */ if (is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); } dumpsize += PAGE_SIZE; progress = dumpsize; /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = msgbufp->msg_size; mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages)); mdhdr.ptesize = ptesize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; - mdhdr.dumpavailsize = round_page(sizeof(dump_avail)); + mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t)); dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION, dumpsize); error = dump_start(di, &kdh); if (error != 0) goto fail; printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump my header */ bzero(tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, tmpbuffer, sizeof(mdhdr)); error = write_buffer(di, tmpbuffer, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = write_buffer(di, (char *)msgbufp->msg_ptr, round_page(msgbufp->msg_size)); if (error) goto fail; - /* Dump dump_avail */ - _Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer), - "Large dump_avail not handled"); + /* Dump dump_avail. Make a copy using 64-bit physical addresses. */ + _Static_assert(nitems(dump_avail) * sizeof(uint64_t) <= + sizeof(tmpbuffer), "Large dump_avail not handled"); bzero(tmpbuffer, sizeof(tmpbuffer)); - memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); + if (sizeof(dump_avail[0]) != sizeof(uint64_t)) { + dump_avail_buf = (uint64_t *)tmpbuffer; + for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i++) { + dump_avail_buf[i] = dump_avail[i]; + dump_avail_buf[i + 1] = dump_avail[i + 1]; + } + } else { + memcpy(tmpbuffer, dump_avail, sizeof(dump_avail)); + } error = write_buffer(di, tmpbuffer, PAGE_SIZE); if (error) goto fail; /* Dump bitmap */ error = write_buffer(di, (char *)vm_page_dump, round_page(BITSET_SIZE(vm_page_dump_pages))); if (error) goto fail; /* Dump kernel page table pages */ for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); if (!count) { prev_pte = (vm_offset_t)pte; count++; } else { if ((vm_offset_t)pte == (prev_pte + count * PAGE_SIZE)) count++; else { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pte = (vm_offset_t)pte; } } } if (count) { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pte = 0; } /* Dump memory chunks page by page*/ VM_PAGE_DUMP_FOREACH(pa) { dump_va = pmap_kenter_temporary(pa, 0); error = write_buffer(di, dump_va, PAGE_SIZE); if (error) goto fail; pmap_kenter_temporary_free(pa); } error = dump_finish(di, &kdh); if (error != 0) goto fail; printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == E2BIG || error == ENOSPC) { printf("\nDump failed. Partition too small (about %lluMB were " "needed this time).\n", (long long)dumpsize >> 20); } else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); }