Index: head/lib/libkvm/kvm.h =================================================================== --- head/lib/libkvm/kvm.h (revision 325731) +++ head/lib/libkvm/kvm.h (revision 325732) @@ -1,125 +1,132 @@ /*- * Copyright (c) 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kvm.h 8.1 (Berkeley) 6/2/93 * $FreeBSD$ */ #ifndef _KVM_H_ #define _KVM_H_ #include #include #include -#include + +/* + * Including vm/vm.h causes namespace pollution issues. For the + * most part, only things using kvm_walk_pages() need to #include it. + */ +#ifndef VM_H +typedef u_char vm_prot_t; +#endif /* Default version symbol. */ #define VRS_SYM "_version" #define VRS_KEY "VERSION" #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif #ifndef _SSIZE_T_DECLARED typedef __ssize_t ssize_t; #define _SSIZE_T_DECLARED #endif typedef uint64_t kvaddr_t; /* An address in a target image. */ struct kvm_nlist { const char *n_name; unsigned char n_type; kvaddr_t n_value; }; typedef struct __kvm kvm_t; struct kinfo_proc; struct proc; struct kvm_swap { char ksw_devname[32]; u_int ksw_used; u_int ksw_total; int ksw_flags; u_int ksw_reserved1; u_int ksw_reserved2; }; struct kvm_page { unsigned int version; u_long paddr; u_long kmap_vaddr; u_long dmap_vaddr; vm_prot_t prot; u_long offset; size_t len; /* end of version 1 */ }; #define SWIF_DEV_PREFIX 0x0002 #define LIBKVM_WALK_PAGES_VERSION 1 __BEGIN_DECLS int kvm_close(kvm_t *); int kvm_dpcpu_setcpu(kvm_t *, unsigned int); char **kvm_getargv(kvm_t *, const struct kinfo_proc *, int); int kvm_getcptime(kvm_t *, long *); char **kvm_getenvv(kvm_t *, const struct kinfo_proc *, int); char *kvm_geterr(kvm_t *); int kvm_getloadavg(kvm_t *, double [], int); int kvm_getmaxcpu(kvm_t *); int kvm_getncpus(kvm_t *); void *kvm_getpcpu(kvm_t *, int); uint64_t kvm_counter_u64_fetch(kvm_t *, u_long); struct kinfo_proc * kvm_getprocs(kvm_t *, int, int, int *); int kvm_getswapinfo(kvm_t *, struct kvm_swap *, int, int); int kvm_native(kvm_t *); int kvm_nlist(kvm_t *, struct nlist *); int kvm_nlist2(kvm_t *, struct kvm_nlist *); kvm_t *kvm_open (const char *, const char *, const char *, int, const char *); kvm_t *kvm_openfiles (const char *, const char *, const char *, int, char *); kvm_t *kvm_open2 (const char *, const char *, int, char *, int (*)(const char *, kvaddr_t *)); ssize_t kvm_read(kvm_t *, unsigned long, void *, size_t); ssize_t kvm_read_zpcpu(kvm_t *, unsigned long, void *, size_t, int); ssize_t kvm_read2(kvm_t *, kvaddr_t, void *, size_t); ssize_t kvm_write(kvm_t *, unsigned long, const void *, size_t); typedef int kvm_walk_pages_cb_t(struct kvm_page *, void *); int kvm_walk_pages(kvm_t *, kvm_walk_pages_cb_t *, void *); __END_DECLS #endif /* !_KVM_H_ */ Index: head/lib/libkvm/kvm_aarch64.h =================================================================== --- head/lib/libkvm/kvm_aarch64.h (revision 325731) +++ head/lib/libkvm/kvm_aarch64.h (revision 325732) @@ -1,67 +1,67 @@ /*- * Copyright (c) 2015 John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_AARCH64_H__ #define __KVM_AARCH64_H__ #ifdef __aarch64__ #include #endif typedef uint64_t aarch64_physaddr_t; typedef uint64_t aarch64_pte_t; #define AARCH64_PAGE_SHIFT 12 #define AARCH64_PAGE_SIZE (1 << AARCH64_PAGE_SHIFT) #define AARCH64_PAGE_MASK (AARCH64_PAGE_SIZE - 1) /* Source: arm64/include/pte.h */ #define AARCH64_ATTR_MASK 0xfff0000000000fff -#define AARCH64_ATTR_UXN (1UL << 54) -#define AARCH64_ATTR_PXN (1UL << 53) +#define AARCH64_ATTR_UXN (1ULL << 54) +#define AARCH64_ATTR_PXN (1ULL << 53) #define AARCH64_ATTR_XN (AARCH64_ATTR_PXN | AARCH64_ATTR_UXN) #define AARCH64_ATTR_AP(x) ((x) << 6) #define AARCH64_ATTR_AP_RO (1 << 1) #define AARCH64_ATTR_DESCR_MASK 3 #define AARCH64_L3_SHIFT 12 #define AARCH64_L3_PAGE 0x3 #ifdef __aarch64__ _Static_assert(PAGE_SHIFT == AARCH64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == AARCH64_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == AARCH64_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(ATTR_MASK == AARCH64_ATTR_MASK, "ATTR_MASK mismatch"); _Static_assert(ATTR_DESCR_MASK == AARCH64_ATTR_DESCR_MASK, "ATTR_DESCR_MASK mismatch"); _Static_assert(L3_SHIFT == AARCH64_L3_SHIFT, "L3_SHIFT mismatch"); _Static_assert(L3_PAGE == AARCH64_L3_PAGE, "L3_PAGE mismatch"); #endif #endif /* !__KVM_AARCH64_H__ */ Index: head/lib/libkvm/kvm_amd64.c =================================================================== --- head/lib/libkvm/kvm_amd64.c (revision 325731) +++ head/lib/libkvm/kvm_amd64.c (revision 325732) @@ -1,337 +1,338 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if defined(LIBC_SCCS) && !defined(lint) #if 0 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; #endif #endif /* LIBC_SCCS and not lint */ /* * AMD64 machine dependent routines for kvm. Hopefully, the forthcoming * vm code will one day obsolete this module. */ #include #include #include #include #include #include +#include #include #include #include "kvm_private.h" #include "kvm_amd64.h" struct vmstate { size_t phnum; GElf_Phdr *phdr; amd64_pml4e_t *PML4; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; if (kd->rawdump) { *ofs = pa; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } static void _amd64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->PML4) free(vm->PML4); free(vm->phdr); free(vm); kd->vmst = NULL; } static int _amd64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && !_kvm_is_minidump(kd)); } static int _amd64_initvtop(kvm_t *kd) { struct kvm_nlist nl[2]; amd64_physaddr_t pa; kvaddr_t kernbase; amd64_pml4e_t *PML4; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PML4 = 0; if (kd->rawdump == 0) { if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no kernbase"); return (-1); } kernbase = nl[0].n_value; nl[0].n_name = "KPML4phys"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no KPML4phys"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); return (-1); } pa = le64toh(pa); PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE); if (PML4 == NULL) { _kvm_err(kd, kd->program, "cannot allocate PML4"); return (-1); } if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); free(PML4); return (-1); } kd->vmst->PML4 = PML4; return (0); } static int _amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_physaddr_t pdpe_pa; amd64_physaddr_t pde_pa; amd64_physaddr_t pte_pa; amd64_pml4e_t pml4e; amd64_pdpe_t pdpe; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pml4eindex; kvaddr_t pdpeindex; kvaddr_t pdeindex; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; size_t s; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (vm->PML4 == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: bootstrap data not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); } pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1); pml4e = le64toh(vm->PML4[pml4eindex]); if ((pml4e & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid"); goto invalid; } pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1); pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t)); s = _kvm_pa2off(kd, pdpe_pa, &ofs); if (s < sizeof(pdpe)) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found"); goto invalid; } if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe"); goto invalid; } pdpe = le64toh(pdpe); if ((pdpe & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid"); goto invalid; } if (pdpe & AMD64_PG_PS) { /* * No next-level page table; pdpe describes one 1GB page. */ a = (pdpe & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 1GB page address not in dump"); goto invalid; } else return (AMD64_NBPDP - (va & AMD64_PDPMASK)); } pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1); pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t)); s = _kvm_pa2off(kd, pde_pa, &ofs); if (s < sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found"); goto invalid; } if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pde"); goto invalid; } pde = le64toh(pde); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pde not valid"); goto invalid; } if (pde & AMD64_PG_PS) { /* * No final-level page table; pde describes one 2MB page. */ a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 2MB page address not in dump"); goto invalid; } else return (AMD64_NBPDR - (va & AMD64_PDRMASK)); } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found"); goto invalid; } if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read"); goto invalid; } if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pte not valid"); goto invalid; } a = (pte & AMD64_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: address not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); return (0); } return (_amd64_vatop(kd, va, pa)); } int _amd64_native(kvm_t *kd __unused) { #ifdef __amd64__ return (1); #else return (0); #endif } static struct kvm_arch kvm_amd64 = { .ka_probe = _amd64_probe, .ka_initvtop = _amd64_initvtop, .ka_freevtop = _amd64_freevtop, .ka_kvatop = _amd64_kvatop, .ka_native = _amd64_native, }; KVM_ARCH(kvm_amd64); Index: head/lib/libkvm/kvm_amd64.h =================================================================== --- head/lib/libkvm/kvm_amd64.h (revision 325731) +++ head/lib/libkvm/kvm_amd64.h (revision 325732) @@ -1,88 +1,90 @@ /*- * Copyright (c) 2015 John H. Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __KVM_AMD64_H__ #define __KVM_AMD64_H__ #ifdef __amd64__ #include #include #endif typedef uint64_t amd64_physaddr_t; typedef uint64_t amd64_pte_t; typedef uint64_t amd64_pde_t; typedef uint64_t amd64_pdpe_t; typedef uint64_t amd64_pml4e_t; #define AMD64_NPTEPG (AMD64_PAGE_SIZE / sizeof(amd64_pte_t)) #define AMD64_PAGE_SHIFT 12 #define AMD64_PAGE_SIZE (1 << AMD64_PAGE_SHIFT) #define AMD64_PAGE_MASK (AMD64_PAGE_SIZE - 1) #define AMD64_NPDEPG (AMD64_PAGE_SIZE / sizeof(amd64_pde_t)) #define AMD64_PDRSHIFT 21 #define AMD64_NBPDR (1 << AMD64_PDRSHIFT) #define AMD64_PDRMASK (AMD64_NBPDR - 1) #define AMD64_NPDPEPG (AMD64_PAGE_SIZE / sizeof(amd64_pdpe_t)) #define AMD64_PDPSHIFT 30 #define AMD64_NBPDP (1 << AMD64_PDPSHIFT) #define AMD64_PDPMASK (AMD64_NBPDP - 1) #define AMD64_NPML4EPG (AMD64_PAGE_SIZE / sizeof(amd64_pml4e_t)) #define AMD64_PML4SHIFT 39 +#define AMD64_PG_NX (1ULL << 63) #define AMD64_PG_V 0x001 +#define AMD64_PG_RW 0x002 #define AMD64_PG_PS 0x080 #define AMD64_PG_FRAME (0x000ffffffffff000) #define AMD64_PG_PS_FRAME (0x000fffffffe00000) #define AMD64_PG_1GB_FRAME (0x000fffffc0000000) #ifdef __amd64__ _Static_assert(NPTEPG == AMD64_NPTEPG, "NPTEPG mismatch"); _Static_assert(PAGE_SHIFT == AMD64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == AMD64_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == AMD64_PAGE_MASK, "PAGE_MASK mismatch"); _Static_assert(NPDEPG == AMD64_NPDEPG, "NPDEPG mismatch"); _Static_assert(PDRSHIFT == AMD64_PDRSHIFT, "PDRSHIFT mismatch"); _Static_assert(NBPDR == AMD64_NBPDR, "NBPDR mismatch"); _Static_assert(PDRMASK == AMD64_PDRMASK, "PDRMASK mismatch"); _Static_assert(NPDPEPG == AMD64_NPDPEPG, "NPDPEPG mismatch"); _Static_assert(PDPSHIFT == AMD64_PDPSHIFT, "PDPSHIFT mismatch"); _Static_assert(NBPDP == AMD64_NBPDP, "NBPDP mismatch"); _Static_assert(PDPMASK == AMD64_PDPMASK, "PDPMASK mismatch"); _Static_assert(NPML4EPG == AMD64_NPML4EPG, "NPML4EPG mismatch"); _Static_assert(PML4SHIFT == AMD64_PML4SHIFT, "PML4SHIFT mismatch"); _Static_assert(PG_V == AMD64_PG_V, "PG_V mismatch"); _Static_assert(PG_PS == AMD64_PG_PS, "PG_PS mismatch"); _Static_assert(PG_FRAME == AMD64_PG_FRAME, "PG_FRAME mismatch"); _Static_assert(PG_PS_FRAME == AMD64_PG_PS_FRAME, "PG_PS_FRAME mismatch"); #endif int _amd64_native(kvm_t *); #endif /* !__KVM_AMD64_H__ */ Index: head/lib/libkvm/kvm_minidump_aarch64.c =================================================================== --- head/lib/libkvm/kvm_minidump_aarch64.c (revision 325731) +++ head/lib/libkvm/kvm_minidump_aarch64.c (revision 325732) @@ -1,286 +1,287 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 */ #include __FBSDID("$FreeBSD$"); /* * ARM64 (AArch64) machine dependent routines for kvm and minidumps. */ #include #include #include #include #include +#include #include #include "../../sys/arm64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_aarch64.h" #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; }; static aarch64_pte_t _aarch64_pte_get(kvm_t *kd, u_long pteindex) { aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static int _aarch64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && _kvm_is_minidump(kd)); } static void _aarch64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _aarch64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); /* build physical address lookup table for sparse pages */ sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) + aarch64_round_page(vmst->hdr.pmapsize); if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) { return (-1); } off += aarch64_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } off += aarch64_round_page(vmst->hdr.pmapsize); return (0); } static int _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; aarch64_physaddr_t offset; aarch64_pte_t l3; kvaddr_t l3_index; aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AARCH64_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~AARCH64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(l3)) goto invalid; l3 = _aarch64_pte_get(kd, l3_index); if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: pde not valid"); goto invalid; } a = l3 & ~AARCH64_ATTR_MASK; ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_aarch64_minidump_kvatop called in live kernel!"); return (0); } return (_aarch64_minidump_vatop(kd, va, pa)); } static int _aarch64_native(kvm_t *kd __unused) { #ifdef __aarch64__ return (1); #else return (0); #endif } static vm_prot_t _aarch64_entry_to_prot(aarch64_pte_t pte) { vm_prot_t prot = VM_PROT_READ; /* Source: arm64/arm64/pmap.c:pmap_protect() */ if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0) prot |= VM_PROT_WRITE; if ((pte & AARCH64_ATTR_XN) == 0) prot |= VM_PROT_EXECUTE; return prot; } static int _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t); u_long bmindex, dva, pa, pteindex, va; struct kvm_bitmap bm; vm_prot_t prot; int ret = 0; if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pteindex = 0; pteindex < nptes; pteindex++) { aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex); if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) continue; va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT); pa = pte & ~AARCH64_ATTR_MASK; dva = vm->hdr.dmapbase + pa; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) { goto out; } } while (_kvm_bitmap_next(&bm, &bmindex)) { pa = bmindex * AARCH64_PAGE_SIZE; dva = vm->hdr.dmapbase + pa; if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE)) break; va = 0; prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, AARCH64_PAGE_SIZE, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_aarch64_minidump = { .ka_probe = _aarch64_minidump_probe, .ka_initvtop = _aarch64_minidump_initvtop, .ka_freevtop = _aarch64_minidump_freevtop, .ka_kvatop = _aarch64_minidump_kvatop, .ka_native = _aarch64_native, .ka_walk_pages = _aarch64_minidump_walk_pages, }; KVM_ARCH(kvm_aarch64_minidump); Index: head/lib/libkvm/kvm_minidump_amd64.c =================================================================== --- head/lib/libkvm/kvm_minidump_amd64.c (revision 325731) +++ head/lib/libkvm/kvm_minidump_amd64.c (revision 325732) @@ -1,431 +1,432 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD64 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include +#include #include #include "../../sys/amd64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_amd64.h" #define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE) #define VM_IS_V1(vm) (vm->hdr.version == 1) #define VA_OFF(vm, va) \ (VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & AMD64_PAGE_MASK)) struct vmstate { struct minidumphdr hdr; }; static vm_prot_t _amd64_entry_to_prot(uint64_t entry) { vm_prot_t prot = VM_PROT_READ; - if ((entry & PG_RW) != 0) + if ((entry & AMD64_PG_RW) != 0) prot |= VM_PROT_WRITE; - if ((entry & PG_NX) == 0) + if ((entry & AMD64_PG_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } /* * Version 2 minidumps use page directory entries, while version 1 use page * table entries. */ static amd64_pde_t _amd64_pde_get(kvm_t *kd, u_long pdeindex) { amd64_pde_t *pde = _kvm_pmap_get(kd, pdeindex, sizeof(*pde)); return le64toh(*pde); } static amd64_pte_t _amd64_pte_get(kvm_t *kd, u_long pteindex) { amd64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } /* Get the first page table entry for a given page directory index. */ static amd64_pte_t * _amd64_pde_first_pte(kvm_t *kd, u_long pdeindex) { u_long *pa; pa = _kvm_pmap_get(kd, pdeindex, sizeof(amd64_pde_t)); if (pa == NULL) return NULL; return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE); } static int _amd64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && _kvm_is_minidump(kd)); } static void _amd64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _amd64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } /* * NB: amd64 minidump header is binary compatible between version 1 * and version 2; this may not be the case for the future versions. */ vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize); sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) + amd64_round_page(vmst->hdr.pmapsize); if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE, sizeof(uint64_t)) == -1) { return (-1); } off += amd64_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) { return (-1); } off += amd64_round_page(vmst->hdr.pmapsize); return (0); } static int _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_pte_t pte; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(pte)) goto invalid; pte = _amd64_pte_get(kd, pteindex); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { amd64_pte_t pt[AMD64_NPTEPG]; struct vmstate *vm; amd64_physaddr_t offset; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pteindex; kvaddr_t pdeindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT; if (pdeindex >= vm->hdr.pmapsize / sizeof(pde)) goto invalid; pde = _amd64_pde_get(kd, pdeindex); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pde not valid"); goto invalid; } if ((pde & AMD64_PG_PS) == 0) { a = pde & AMD64_PG_FRAME; /* TODO: Just read the single PTE */ ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "cannot find page table entry for %ju", (uintmax_t)a); goto invalid; } if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read page table entry for %ju", (uintmax_t)a); goto invalid; } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte = le64toh(pt[pteindex]); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; } else { a = pde & AMD64_PG_PS_FRAME; a += (va & AMD64_PDRMASK) ^ offset; } ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_amd64_minidump_kvatop called in live kernel!"); return (0); } if (((struct vmstate *)kd->vmst)->hdr.version == 1) return (_amd64_minidump_vatop_v1(kd, va, pa)); else return (_amd64_minidump_vatop(kd, va, pa)); } static int _amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long npdes = vm->hdr.pmapsize / sizeof(amd64_pde_t); u_long bmindex, dva, pa, pdeindex, va; struct kvm_bitmap bm; int ret = 0; vm_prot_t prot; unsigned int pgsz = AMD64_PAGE_SIZE; if (vm->hdr.version < 2) return (0); if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex)) return (0); for (pdeindex = 0; pdeindex < npdes; pdeindex++) { - pd_entry_t pde = _amd64_pde_get(kd, pdeindex); - pt_entry_t *ptes; + amd64_pde_t pde = _amd64_pde_get(kd, pdeindex); + amd64_pte_t *ptes; u_long i; va = vm->hdr.kernbase + (pdeindex << AMD64_PDRSHIFT); - if ((pde & PG_V) == 0) + if ((pde & AMD64_PG_V) == 0) continue; if ((pde & AMD64_PG_PS) != 0) { /* * Large page. Iterate on each 4K page section * within this page. This differs from 4K pages in * that every page here uses the same PDE to * generate permissions. */ - pa = pde & AMD64_PG_PS_FRAME + + pa = (pde & AMD64_PG_PS_FRAME) + ((va & AMD64_PDRMASK) ^ VA_OFF(vm, va)); dva = vm->hdr.dmapbase + pa; _kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE); if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) { goto out; } continue; } /* 4K pages: pde references another page of entries. */ ptes = _amd64_pde_first_pte(kd, pdeindex); /* Ignore page directory pages that were not dumped. */ if (ptes == NULL) continue; - for (i = 0; i < NPTEPG; i++) { - pt_entry_t pte = (u_long)ptes[i]; + for (i = 0; i < AMD64_NPTEPG; i++) { + amd64_pte_t pte = (u_long)ptes[i]; pa = pte & AMD64_PG_FRAME; dva = vm->hdr.dmapbase + pa; - if ((pte & PG_V) != 0) { + if ((pte & AMD64_PG_V) != 0) { _kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE); if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _amd64_entry_to_prot(pte), pgsz, 0)) { goto out; } } va += AMD64_PAGE_SIZE; } } while (_kvm_bitmap_next(&bm, &bmindex)) { pa = bmindex * AMD64_PAGE_SIZE; dva = vm->hdr.dmapbase + pa; if (vm->hdr.dmapend < (dva + pgsz)) break; va = 0; /* amd64/pmap.c: create_pagetables(): dmap always R|W. */ prot = VM_PROT_READ | VM_PROT_WRITE; if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, pgsz, 0)) { goto out; } } ret = 1; out: _kvm_bitmap_deinit(&bm); return (ret); } static struct kvm_arch kvm_amd64_minidump = { .ka_probe = _amd64_minidump_probe, .ka_initvtop = _amd64_minidump_initvtop, .ka_freevtop = _amd64_minidump_freevtop, .ka_kvatop = _amd64_minidump_kvatop, .ka_native = _amd64_native, .ka_walk_pages = _amd64_minidump_walk_pages, }; KVM_ARCH(kvm_amd64_minidump); Index: head/lib/libkvm/kvm_minidump_arm.c =================================================================== --- head/lib/libkvm/kvm_minidump_arm.c (revision 325731) +++ head/lib/libkvm/kvm_minidump_arm.c (revision 325732) @@ -1,270 +1,271 @@ /*- * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_i386.c,v 1.2 2006/06/05 08:51:14 */ #include __FBSDID("$FreeBSD$"); /* * ARM machine dependent routines for kvm and minidumps. */ #include #include +#include #include #include #include #include #include #include #include "../../sys/arm/include/minidump.h" #include "kvm_private.h" #include "kvm_arm.h" #define arm_round_page(x) roundup2((kvaddr_t)(x), ARM_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; unsigned char ei_data; }; static arm_pt_entry_t _arm_pte_get(kvm_t *kd, u_long pteindex) { arm_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return _kvm32toh(kd, *pte); } static int _arm_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && _kvm_is_minidump(kd)); } static void _arm_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _arm_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase); vmst->hdr.arch = _kvm32toh(kd, vmst->hdr.arch); vmst->hdr.mmuformat = _kvm32toh(kd, vmst->hdr.mmuformat); if (vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_UNKNOWN) { /* This is a safe default as 1K pages are not used. */ vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; } /* Skip header and msgbuf */ off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize); sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) + arm_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE, sizeof(uint32_t)) == -1) { return (-1); } off += arm_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += arm_round_page(vmst->hdr.ptesize); return (0); } static int _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; arm_pt_entry_t pte; arm_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } vm = kd->vmst; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _arm_pte_get(kd, pteindex); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not valid"); goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page -> convert to be like 4K page */ offset = va & ARM_L2_S_OFFSET; a = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not supported"); goto invalid; } /* 4K page */ offset = va & ARM_L2_S_OFFSET; a = pte & ARM_L2_S_FRAME; } ofs = _kvm_pt_find(kd, a, ARM_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (ARM_PAGE_SIZE - offset); } else _kvm_err(kd, kd->program, "_arm_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static vm_prot_t _arm_entry_to_prot(kvm_t *kd, arm_pt_entry_t pte) { struct vmstate *vm = kd->vmst; vm_prot_t prot = VM_PROT_READ; /* Source: arm/arm/pmap-v4.c:pmap_fault_fixup() */ if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4) { if (pte & ARM_L2_S_PROT_W) prot |= VM_PROT_WRITE; return prot; } /* Source: arm/arm/pmap-v6.c:pmap_protect() */ if ((pte & ARM_PTE2_RO) == 0) prot |= VM_PROT_WRITE; if ((pte & ARM_PTE2_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } static int _arm_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct vmstate *vm = kd->vmst; u_long nptes = vm->hdr.ptesize / sizeof(arm_pt_entry_t); u_long dva, pa, pteindex, va; for (pteindex = 0; pteindex < nptes; pteindex++) { arm_pt_entry_t pte = _arm_pte_get(kd, pteindex); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) continue; va = vm->hdr.kernbase + (pteindex << ARM_PAGE_SHIFT); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page */ pa = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { continue; } /* 4K page */ pa = pte & ARM_L2_S_FRAME; } dva = 0; /* no direct map on this platform */ if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, _arm_entry_to_prot(kd, pte), ARM_PAGE_SIZE, 0)) return (0); } return (1); } static struct kvm_arch kvm_arm_minidump = { .ka_probe = _arm_minidump_probe, .ka_initvtop = _arm_minidump_initvtop, .ka_freevtop = _arm_minidump_freevtop, .ka_kvatop = _arm_minidump_kvatop, .ka_native = _arm_native, .ka_walk_pages = _arm_minidump_walk_pages, }; KVM_ARCH(kvm_arm_minidump); Index: head/lib/libkvm/kvm_minidump_i386.c =================================================================== --- head/lib/libkvm/kvm_minidump_i386.c (revision 325731) +++ head/lib/libkvm/kvm_minidump_i386.c (revision 325732) @@ -1,334 +1,339 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * i386 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include +#include #include #include "../../sys/i386/include/minidump.h" #include #include "kvm_private.h" #include "kvm_i386.h" #define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; }; static i386_pte_pae_t _i386_pte_pae_get(kvm_t *kd, u_long pteindex) { i386_pte_pae_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le64toh(*pte); } static i386_pte_t _i386_pte_get(kvm_t *kd, u_long pteindex) { i386_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte)); return le32toh(*pte); } static int _i386_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && _kvm_is_minidump(kd)); } static void _i386_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _i386_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize); vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase); vmst->hdr.paemode = le32toh(vmst->hdr.paemode); /* Skip header and msgbuf */ off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize); sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) + i386_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE, sizeof(uint32_t)) == -1) { return (-1); } off += i386_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += i386_round_page(vmst->hdr.ptesize); return (0); } static int _i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_pae_t offset; i386_pte_pae_t pte; kvaddr_t pteindex; i386_physaddr_pae_t a; off_t ofs; vm = kd->vmst; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _i386_pte_pae_get(kd, pteindex); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME_PAE; ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_t offset; i386_pte_t pte; kvaddr_t pteindex; i386_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(pte)) goto invalid; pte = _i386_pte_get(kd, pteindex); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME; ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!"); return (0); } if (kd->vmst->hdr.paemode) return (_i386_minidump_vatop_pae(kd, va, pa)); else return (_i386_minidump_vatop(kd, va, pa)); } static vm_prot_t _i386_entry_to_prot(uint64_t pte) { vm_prot_t prot = VM_PROT_READ; /* Source: i386/pmap.c:pmap_protect() */ if (pte & I386_PG_RW) prot |= VM_PROT_WRITE; if ((pte & I386_PG_NX) == 0) prot |= VM_PROT_EXECUTE; return prot; } struct i386_iter { kvm_t *kd; u_long nptes; u_long pteindex; }; static void _i386_iterator_init(struct i386_iter *it, kvm_t *kd) { struct vmstate *vm = kd->vmst; it->kd = kd; it->pteindex = 0; if (vm->hdr.paemode) { it->nptes = vm->hdr.ptesize / sizeof(i386_pte_pae_t); } else { it->nptes = vm->hdr.ptesize / sizeof(i386_pte_t); } return; } static int _i386_iterator_next(struct i386_iter *it, u_long *pa, u_long *va, u_long *dva, vm_prot_t *prot) { struct vmstate *vm = it->kd->vmst; i386_pte_t pte32; i386_pte_pae_t pte64; int found = 0; *dva = 0; + *pa = 0; + *va = 0; + *dva = 0; + *prot = 0; for (; it->pteindex < it->nptes && found == 0; it->pteindex++) { if (vm->hdr.paemode) { pte64 = _i386_pte_pae_get(it->kd, it->pteindex); if ((pte64 & I386_PG_V) == 0) continue; *prot = _i386_entry_to_prot(pte64); *pa = pte64 & I386_PG_FRAME_PAE; } else { pte32 = _i386_pte_get(it->kd, it->pteindex); if ((pte32 & I386_PG_V) == 0) continue; *prot = _i386_entry_to_prot(pte32); *pa = pte32 & I386_PG_FRAME; } *va = vm->hdr.kernbase + (it->pteindex << I386_PAGE_SHIFT); found = 1; } return found; } static int _i386_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct i386_iter it; u_long dva, pa, va; vm_prot_t prot; _i386_iterator_init(&it, kd); while (_i386_iterator_next(&it, &pa, &va, &dva, &prot)) { if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, I386_PAGE_SIZE, 0)) { return (0); } } return (1); } static struct kvm_arch kvm_i386_minidump = { .ka_probe = _i386_minidump_probe, .ka_initvtop = _i386_minidump_initvtop, .ka_freevtop = _i386_minidump_freevtop, .ka_kvatop = _i386_minidump_kvatop, .ka_native = _i386_native, .ka_walk_pages = _i386_minidump_walk_pages, }; KVM_ARCH(kvm_i386_minidump); Index: head/lib/libkvm/kvm_minidump_mips.c =================================================================== --- head/lib/libkvm/kvm_minidump_mips.c (revision 325731) +++ head/lib/libkvm/kvm_minidump_mips.c (revision 325732) @@ -1,360 +1,363 @@ /*- * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_arm.c r214223 */ #include __FBSDID("$FreeBSD$"); /* * MIPS machine dependent routines for kvm and minidumps. */ #include +#include #include #include #include #include #include #include #include "../../sys/mips/include/cpuregs.h" #include "../../sys/mips/include/minidump.h" #include "kvm_private.h" #include "kvm_mips.h" #define mips_round_page(x) roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; int pte_size; }; static int _mips_minidump_probe(kvm_t *kd) { if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 && kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); if (kd->nlehdr.e_machine != EM_MIPS) return (0); return (_kvm_is_minidump(kd)); } static void _mips_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm); kd->vmst = NULL; } static int _mips_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off, sparse_off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 || kd->nlehdr.e_flags & EF_MIPS_ABI2) vmst->pte_size = 64; else vmst->pte_size = 32; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase); vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase); vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend); /* Skip header and msgbuf */ off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize); sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) + mips_round_page(vmst->hdr.ptesize); if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE, sizeof(uint32_t)) == -1) { return (-1); } off += mips_round_page(vmst->hdr.bitmapsize); if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) { return (-1); } off += mips_round_page(vmst->hdr.ptesize); return (0); } static int _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; mips_physaddr_t offset, a; kvaddr_t pteindex; u_long valid; off_t ofs; mips32_pte_t pte32; mips64_pte_t pte64; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!"); return (0); } offset = va & MIPS_PAGE_MASK; /* Operate with page-aligned address */ va &= ~MIPS_PAGE_MASK; vm = kd->vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) { a = va & MIPS_XKPHYS_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } else { if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT; if (vm->pte_size == 64) { valid = pteindex < vm->hdr.ptesize / sizeof(pte64); if (pteindex >= vm->hdr.ptesize / sizeof(pte64)) goto invalid; pte64 = _mips64_pte_get(kd, pteindex); valid = pte64 & MIPS_PTE_V; if (valid) a = MIPS64_PTE_TO_PA(pte64); } else { if (pteindex >= vm->hdr.ptesize / sizeof(pte32)) goto invalid; pte32 = _mips32_pte_get(kd, pteindex); valid = pte32 & MIPS_PTE_V; if (valid) a = MIPS32_PTE_TO_PA(pte32); } if (!valid) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte " "not valid"); goto invalid; } } else { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); return (0); } found: ofs = _kvm_pt_find(kd, a, MIPS_PAGE_SIZE); if (ofs == -1) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical " "address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (MIPS_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int #ifdef __mips__ _mips_native(kvm_t *kd) #else _mips_native(kvm_t *kd __unused) #endif { #ifdef __mips__ #ifdef __mips_n64 if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); #else if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32) return (0); #ifdef __mips_n32 if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2)) return (0); #else if (kd->nlehdr.e_flags & EF_MIPS_ABI2) return (0); #endif #endif #if _BYTE_ORDER == _LITTLE_ENDIAN return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); #else return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); #endif #else return (0); #endif } struct mips_iter { kvm_t *kd; u_long nptes; u_long pteindex; }; static void _mips_iterator_init(struct mips_iter *it, kvm_t *kd) { struct vmstate *vm = kd->vmst; it->kd = kd; it->pteindex = 0; if (vm->pte_size == 64) it->nptes = vm->hdr.ptesize / sizeof(mips64_pte_t); else it->nptes = vm->hdr.ptesize / sizeof(mips32_pte_t); return; } static int _mips_iterator_next(struct mips_iter *it, u_long *pa, u_long *va, u_long *dva, vm_prot_t *prot) { struct vmstate *vm = it->kd->vmst; int found = 0; mips64_pte_t pte64; mips32_pte_t pte32; /* * mips/mips/pmap.c: init_pte_prot / pmap_protect indicate that all * pages are R|X at least. */ *prot = VM_PROT_READ | VM_PROT_EXECUTE; + *pa = 0; + *va = 0; *dva = 0; for (;it->pteindex < it->nptes && found == 0; it->pteindex++) { if (vm->pte_size == 64) { pte64 = _mips64_pte_get(it->kd, it->pteindex); if ((pte64 & MIPS_PTE_V) == 0) continue; if ((pte64 & MIPS64_PTE_RO) == 0) *prot |= VM_PROT_WRITE; *pa = MIPS64_PTE_TO_PA(pte64); } else { pte32 = _mips32_pte_get(it->kd, it->pteindex); if ((pte32 & MIPS_PTE_V) == 0) continue; if ((pte32 & MIPS32_PTE_RO) == 0) *prot |= VM_PROT_WRITE; *pa = MIPS32_PTE_TO_PA(pte32); } *va = vm->hdr.kernbase + (it->pteindex << MIPS_PAGE_SHIFT); found = 1; /* advance pteindex regardless */ } return found; } static int _mips_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg) { struct mips_iter it; u_long dva, pa, va; vm_prot_t prot; /* Generate direct mapped entries; need page entries for prot etc? */ if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { /* MIPS_XKPHYS_START..MIPS_XKPHYS_END */ /* MIPS64_KSEG0_START..MIPS64_KSEG0_END */ /* MIPS64_KSEG1_START..MIPS64_KSEG1_START */ } else { /* MIPS32_KSEG0_START..MIPS32_KSEG0_END */ /* MIPS32_KSEG1_START..MIPS32_KSEG1_END */ } _mips_iterator_init(&it, kd); while (_mips_iterator_next(&it, &pa, &va, &dva, &prot)) { if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, MIPS_PAGE_SIZE, 0)) { return (0); } } return (1); } static struct kvm_arch kvm_mips_minidump = { .ka_probe = _mips_minidump_probe, .ka_initvtop = _mips_minidump_initvtop, .ka_freevtop = _mips_minidump_freevtop, .ka_kvatop = _mips_minidump_kvatop, .ka_native = _mips_native, .ka_walk_pages = _mips_minidump_walk_pages, }; KVM_ARCH(kvm_mips_minidump); Index: head/lib/libkvm/kvm_private.c =================================================================== --- head/lib/libkvm/kvm_private.c (revision 325731) +++ head/lib/libkvm/kvm_private.c (revision 325732) @@ -1,766 +1,767 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #define _WANT_VNET #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include "kvm_private.h" /* * Routines private to libkvm. */ /* from src/lib/libc/gen/nlist.c */ int __fdnlist(int, struct nlist *); /* * Report an error using printf style arguments. "program" is kd->program * on hard errors, and 0 on soft errors, so that under sun error emulation, * only hard errors are printed out (otherwise, programs like gdb will * generate tons of error messages when trying to access bogus pointers). */ void _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (program != NULL) { (void)fprintf(stderr, "%s: ", program); (void)vfprintf(stderr, fmt, ap); (void)fputc('\n', stderr); } else (void)vsnprintf(kd->errbuf, sizeof(kd->errbuf), fmt, ap); va_end(ap); } void _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...) { va_list ap; int n; va_start(ap, fmt); if (program != NULL) { (void)fprintf(stderr, "%s: ", program); (void)vfprintf(stderr, fmt, ap); (void)fprintf(stderr, ": %s\n", strerror(errno)); } else { char *cp = kd->errbuf; (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap); n = strlen(cp); (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s", strerror(errno)); } va_end(ap); } void * _kvm_malloc(kvm_t *kd, size_t n) { void *p; if ((p = calloc(n, sizeof(char))) == NULL) _kvm_err(kd, kd->program, "can't allocate %zu bytes: %s", n, strerror(errno)); return (p); } int _kvm_probe_elf_kernel(kvm_t *kd, int class, int machine) { return (kd->nlehdr.e_ident[EI_CLASS] == class && kd->nlehdr.e_type == ET_EXEC && kd->nlehdr.e_machine == machine); } int _kvm_is_minidump(kvm_t *kd) { char minihdr[8]; if (kd->rawdump) return (0); if (pread(kd->pmfd, &minihdr, 8, 0) == 8 && memcmp(&minihdr, "minidump", 8) == 0) return (1); return (0); } /* * The powerpc backend has a hack to strip a leading kerneldump * header from the core before treating it as an ELF header. * * We can add that here if we can get a change to libelf to support * an initial offset into the file. Alternatively we could patch * savecore to extract cores from a regular file instead. */ int _kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp) { GElf_Ehdr ehdr; GElf_Phdr *phdr; Elf *elf; size_t i, phnum; elf = elf_begin(kd->pmfd, ELF_C_READ, NULL); if (elf == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); return (-1); } if (elf_kind(elf) != ELF_K_ELF) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (gelf_getehdr(elf, &ehdr) == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } if (ehdr.e_type != ET_CORE) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (ehdr.e_machine != kd->nlehdr.e_machine) { _kvm_err(kd, kd->program, "invalid core"); goto bad; } if (elf_getphdrnum(elf, &phnum) == -1) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } phdr = calloc(phnum, sizeof(*phdr)); if (phdr == NULL) { _kvm_err(kd, kd->program, "failed to allocate phdrs"); goto bad; } for (i = 0; i < phnum; i++) { if (gelf_getphdr(elf, i, &phdr[i]) == NULL) { free(phdr); _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); goto bad; } } elf_end(elf); *phnump = phnum; *phdrp = phdr; return (0); bad: elf_end(elf); return (-1); } /* * Transform v such that only bits [bit0, bitN) may be set. Generates a * bitmask covering the number of bits, then shifts so +bit0+ is the first. */ static uint64_t bitmask_range(uint64_t v, uint64_t bit0, uint64_t bitN) { if (bit0 == 0 && bitN == BITS_IN(v)) return (v); return (v & (((1ULL << (bitN - bit0)) - 1ULL) << bit0)); } /* * Returns the number of bits in a given byte array range starting at a * given base, from bit0 to bitN. bit0 may be non-zero in the case of * counting backwards from bitN. */ static uint64_t popcount_bytes(uint64_t *addr, uint32_t bit0, uint32_t bitN) { uint32_t res = bitN - bit0; uint64_t count = 0; uint32_t bound; /* Align to 64-bit boundary on the left side if needed. */ if ((bit0 % BITS_IN(*addr)) != 0) { bound = MIN(bitN, roundup2(bit0, BITS_IN(*addr))); count += __bitcount64(bitmask_range(*addr, bit0, bound)); res -= (bound - bit0); addr++; } while (res > 0) { bound = MIN(res, BITS_IN(*addr)); count += __bitcount64(bitmask_range(*addr, 0, bound)); res -= bound; addr++; } return (count); } void * _kvm_pmap_get(kvm_t *kd, u_long idx, size_t len) { - off_t off = idx * len; + uintptr_t off = idx * len; - if (off >= kd->pt_sparse_off) + if ((off_t)off >= kd->pt_sparse_off) return (NULL); return (void *)((uintptr_t)kd->page_map + off); } void * _kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size) { off_t off; uintptr_t addr; off = _kvm_pt_find(kd, pa, page_size); if (off == -1) return NULL; addr = (uintptr_t)kd->page_map + off; if (off >= kd->pt_sparse_off) addr = (uintptr_t)kd->sparse_map + (off - kd->pt_sparse_off); return (void *)addr; } int _kvm_pt_init(kvm_t *kd, size_t map_len, off_t map_off, off_t sparse_off, int page_size, int word_size) { uint64_t *addr; uint32_t *popcount_bin; int bin_popcounts = 0; uint64_t pc_bins, res; ssize_t rd; /* * Map the bitmap specified by the arguments. */ kd->pt_map = _kvm_malloc(kd, map_len); if (kd->pt_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %zu bytes for bitmap", map_len); return (-1); } rd = pread(kd->pmfd, kd->pt_map, map_len, map_off); if (rd < 0 || rd != (ssize_t)map_len) { _kvm_err(kd, kd->program, "cannot read %zu bytes for bitmap", map_len); return (-1); } kd->pt_map_size = map_len; /* * Generate a popcount cache for every POPCOUNT_BITS in the bitmap, * so lookups only have to calculate the number of bits set between * a cache point and their bit. This reduces lookups to O(1), * without significantly increasing memory requirements. * * Round up the number of bins so that 'upper half' lookups work for * the final bin, if needed. The first popcount is 0, since no bits * precede bit 0, so add 1 for that also. Without this, extra work * would be needed to handle the first PTEs in _kvm_pt_find(). */ addr = kd->pt_map; res = map_len; pc_bins = 1 + (res * NBBY + POPCOUNT_BITS / 2) / POPCOUNT_BITS; kd->pt_popcounts = calloc(pc_bins, sizeof(uint32_t)); if (kd->pt_popcounts == NULL) { _kvm_err(kd, kd->program, "cannot allocate popcount bins"); return (-1); } for (popcount_bin = &kd->pt_popcounts[1]; res > 0; addr++, res -= sizeof(*addr)) { *popcount_bin += popcount_bytes(addr, 0, MIN(res * NBBY, BITS_IN(*addr))); if (++bin_popcounts == POPCOUNTS_IN(*addr)) { popcount_bin++; *popcount_bin = *(popcount_bin - 1); bin_popcounts = 0; } } assert(pc_bins * sizeof(*popcount_bin) == ((uintptr_t)popcount_bin - (uintptr_t)kd->pt_popcounts)); kd->pt_sparse_off = sparse_off; kd->pt_sparse_size = (uint64_t)*popcount_bin * page_size; kd->pt_page_size = page_size; kd->pt_word_size = word_size; /* * Map the sparse page array. This is useful for performing point * lookups of specific pages, e.g. for kvm_walk_pages. Generally, * this is much larger than is reasonable to read in up front, so * mmap it in instead. */ kd->sparse_map = mmap(NULL, kd->pt_sparse_size, PROT_READ, MAP_PRIVATE, kd->pmfd, kd->pt_sparse_off); if (kd->sparse_map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map %" PRIu64 - " bytes from fd %d offset %ld for sparse map: %s", + " bytes from fd %d offset %jd for sparse map: %s", kd->pt_sparse_size, kd->pmfd, - kd->pt_sparse_off, strerror(errno)); + (intmax_t)kd->pt_sparse_off, strerror(errno)); return (-1); } return (0); } int _kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off) { ssize_t exp_len = pmap_size; kd->page_map_size = pmap_size; kd->page_map_off = pmap_off; kd->page_map = _kvm_malloc(kd, pmap_size); if (kd->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %u bytes " "for page map", pmap_size); return (-1); } if (pread(kd->pmfd, kd->page_map, pmap_size, pmap_off) != exp_len) { _kvm_err(kd, kd->program, "cannot read %d bytes from " - "offset %ld for page map", pmap_size, pmap_off); + "offset %jd for page map", pmap_size, (intmax_t)pmap_off); return (-1); } return (0); } /* * Find the offset for the given physical page address; returns -1 otherwise. * * A page's offset is represented by the sparse page base offset plus the * number of bits set before its bit multiplied by page size. This means * that if a page exists in the dump, it's necessary to know how many pages * in the dump precede it. Reduce this O(n) counting to O(1) by caching the * number of bits set at POPCOUNT_BITS intervals. * * Then to find the number of pages before the requested address, simply * index into the cache and count the number of bits set between that cache * bin and the page's bit. Halve the number of bytes that have to be * checked by also counting down from the next higher bin if it's closer. */ off_t _kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size) { uint64_t *bitmap = kd->pt_map; uint64_t pte_bit_id = pa / page_size; uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap); uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS; uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap)); uint64_t bitN; uint32_t count; /* Check whether the page address requested is in the dump. */ if (pte_bit_id >= (kd->pt_map_size * NBBY) || (bitmap[pte_u64] & pte_mask) == 0) return (-1); /* * Add/sub popcounts from the bitmap until the PTE's bit is reached. * For bits that are in the upper half between the calculated * popcount id and the next one, use the next one and subtract to * minimize the number of popcounts required. */ if ((pte_bit_id % POPCOUNT_BITS) < (POPCOUNT_BITS / 2)) { count = kd->pt_popcounts[popcount_id] + popcount_bytes( bitmap + popcount_id * POPCOUNTS_IN(*bitmap), 0, pte_bit_id - popcount_id * POPCOUNT_BITS); } else { /* * Counting in reverse is trickier, since we must avoid * reading from bytes that are not in range, and invert. */ uint64_t pte_u64_bit_off = pte_u64 * BITS_IN(*bitmap); popcount_id++; bitN = MIN(popcount_id * POPCOUNT_BITS, kd->pt_map_size * BITS_IN(uint8_t)); count = kd->pt_popcounts[popcount_id] - popcount_bytes( bitmap + pte_u64, pte_bit_id - pte_u64_bit_off, bitN - pte_u64_bit_off); } /* * This can only happen if the core is truncated. Treat these * entries as if they don't exist, since their backing doesn't. */ if (count >= (kd->pt_sparse_size / page_size)) return (-1); return (kd->pt_sparse_off + (uint64_t)count * page_size); } static int kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list) { kvaddr_t addr; int error, nfail; if (kd->resolve_symbol == NULL) { struct nlist *nl; int count, i; for (count = 0; list[count].n_name != NULL && list[count].n_name[0] != '\0'; count++) ; nl = calloc(count + 1, sizeof(*nl)); for (i = 0; i < count; i++) nl[i].n_name = list[i].n_name; nfail = __fdnlist(kd->nlfd, nl); for (i = 0; i < count; i++) { list[i].n_type = nl[i].n_type; list[i].n_value = nl[i].n_value; } free(nl); return (nfail); } nfail = 0; while (list->n_name != NULL && list->n_name[0] != '\0') { error = kd->resolve_symbol(list->n_name, &addr); if (error != 0) { nfail++; list->n_value = 0; list->n_type = 0; } else { list->n_value = addr; list->n_type = N_DATA | N_EXT; } list++; } return (nfail); } /* * Walk the list of unresolved symbols, generate a new list and prefix the * symbol names, try again, and merge back what we could resolve. */ static int kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing, const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t)) { struct kvm_nlist *n, *np, *p; char *cp, *ce; const char *ccp; size_t len; int slen, unresolved; /* * Calculate the space we need to malloc for nlist and names. * We are going to store the name twice for later lookups: once * with the prefix and once the unmodified name delmited by \0. */ len = 0; unresolved = 0; for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; len += sizeof(struct kvm_nlist) + strlen(prefix) + 2 * (strlen(p->n_name) + 1); unresolved++; } if (unresolved == 0) return (unresolved); /* Add space for the terminating nlist entry. */ len += sizeof(struct kvm_nlist); unresolved++; /* Alloc one chunk for (nlist, [names]) and setup pointers. */ n = np = malloc(len); bzero(n, len); if (n == NULL) return (missing); cp = ce = (char *)np; cp += unresolved * sizeof(struct kvm_nlist); ce += len; /* Generate shortened nlist with special prefix. */ unresolved = 0; for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; *np = *p; /* Save the new\0orig. name so we can later match it again. */ slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix, (prefix[0] != '\0' && p->n_name[0] == '_') ? (p->n_name + 1) : p->n_name, '\0', p->n_name); if (slen < 0 || slen >= ce - cp) continue; np->n_name = cp; cp += slen + 1; np++; unresolved++; } /* Do lookup on the reduced list. */ np = n; unresolved = kvm_fdnlist(kd, np); /* Check if we could resolve further symbols and update the list. */ if (unresolved >= 0 && unresolved < missing) { /* Find the first freshly resolved entry. */ for (; np->n_name && np->n_name[0]; np++) if (np->n_type != N_UNDF) break; /* * The lists are both in the same order, * so we can walk them in parallel. */ for (p = nl; np->n_name && np->n_name[0] && p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; /* Skip expanded name and compare to orig. one. */ ccp = np->n_name + strlen(np->n_name) + 1; if (strcmp(ccp, p->n_name) != 0) continue; /* Update nlist with new, translated results. */ p->n_type = np->n_type; if (validate_fn) p->n_value = (*validate_fn)(kd, np->n_value); else p->n_value = np->n_value; missing--; /* Find next freshly resolved entry. */ for (np++; np->n_name && np->n_name[0]; np++) if (np->n_type != N_UNDF) break; } } /* We could assert missing = unresolved here. */ free(n); return (unresolved); } int _kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize) { struct kvm_nlist *p; int nvalid; struct kld_sym_lookup lookup; int error; const char *prefix = ""; char symname[1024]; /* XXX-BZ symbol name length limit? */ int tried_vnet, tried_dpcpu; /* * If we can't use the kld symbol lookup, revert to the * slow library call. */ if (!ISALIVE(kd)) { error = kvm_fdnlist(kd, nl); if (error <= 0) /* Hard error or success. */ return (error); if (_kvm_vnet_initialized(kd, initialize)) error = kvm_fdnlist_prefix(kd, nl, error, VNET_SYMPREFIX, _kvm_vnet_validaddr); if (error > 0 && _kvm_dpcpu_initialized(kd, initialize)) error = kvm_fdnlist_prefix(kd, nl, error, DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr); return (error); } /* * We can use the kld lookup syscall. Go through each nlist entry * and look it up with a kldsym(2) syscall. */ nvalid = 0; tried_vnet = 0; tried_dpcpu = 0; again: for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; lookup.version = sizeof(lookup); lookup.symvalue = 0; lookup.symsize = 0; error = snprintf(symname, sizeof(symname), "%s%s", prefix, (prefix[0] != '\0' && p->n_name[0] == '_') ? (p->n_name + 1) : p->n_name); if (error < 0 || error >= (int)sizeof(symname)) continue; lookup.symname = symname; if (lookup.symname[0] == '_') lookup.symname++; if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) { p->n_type = N_TEXT; if (_kvm_vnet_initialized(kd, initialize) && strcmp(prefix, VNET_SYMPREFIX) == 0) p->n_value = _kvm_vnet_validaddr(kd, lookup.symvalue); else if (_kvm_dpcpu_initialized(kd, initialize) && strcmp(prefix, DPCPU_SYMPREFIX) == 0) p->n_value = _kvm_dpcpu_validaddr(kd, lookup.symvalue); else p->n_value = lookup.symvalue; ++nvalid; /* lookup.symsize */ } } /* * Check the number of entries that weren't found. If they exist, * try again with a prefix for virtualized or DPCPU symbol names. */ error = ((p - nl) - nvalid); if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) { tried_vnet = 1; prefix = VNET_SYMPREFIX; goto again; } if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) { tried_dpcpu = 1; prefix = DPCPU_SYMPREFIX; goto again; } /* * Return the number of entries that weren't found. If they exist, * also fill internal error buffer. */ error = ((p - nl) - nvalid); if (error) _kvm_syserr(kd, kd->program, "kvm_nlist"); return (error); } int _kvm_bitmap_init(struct kvm_bitmap *bm, u_long bitmapsize, u_long *idx) { *idx = ULONG_MAX; bm->map = calloc(bitmapsize, sizeof *bm->map); if (bm->map == NULL) return (0); bm->size = bitmapsize; return (1); } void _kvm_bitmap_set(struct kvm_bitmap *bm, u_long pa, unsigned int page_size) { u_long bm_index = pa / page_size; uint8_t *byte = &bm->map[bm_index / 8]; *byte |= (1UL << (bm_index % 8)); } int _kvm_bitmap_next(struct kvm_bitmap *bm, u_long *idx) { u_long first_invalid = bm->size * CHAR_BIT; if (*idx == ULONG_MAX) *idx = 0; else (*idx)++; /* Find the next valid idx. */ for (; *idx < first_invalid; (*idx)++) { unsigned int mask = *idx % CHAR_BIT; if ((bm->map[*idx * CHAR_BIT] & mask) == 0) break; } return (*idx < first_invalid); } void _kvm_bitmap_deinit(struct kvm_bitmap *bm) { free(bm->map); } int _kvm_visit_cb(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg, u_long pa, u_long kmap_vaddr, u_long dmap_vaddr, vm_prot_t prot, size_t len, unsigned int page_size) { unsigned int pgsz = page_size ? page_size : len; struct kvm_page p = { .version = LIBKVM_WALK_PAGES_VERSION, .paddr = pa, .kmap_vaddr = kmap_vaddr, .dmap_vaddr = dmap_vaddr, .prot = prot, .offset = _kvm_pt_find(kd, pa, pgsz), .len = len, }; return cb(&p, arg); }