Index: stable/11/lib/libkvm/kvm_amd64.c =================================================================== --- stable/11/lib/libkvm/kvm_amd64.c (revision 316125) +++ stable/11/lib/libkvm/kvm_amd64.c (revision 316126) @@ -1,337 +1,337 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if defined(LIBC_SCCS) && !defined(lint) #if 0 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; #endif #endif /* LIBC_SCCS and not lint */ /* * AMD64 machine dependent routines for kvm. Hopefully, the forthcoming * vm code will one day obsolete this module. */ #include #include #include #include #include #include #include #include #include "kvm_private.h" #include "kvm_amd64.h" struct vmstate { size_t phnum; GElf_Phdr *phdr; amd64_pml4e_t *PML4; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; if (kd->rawdump) { *ofs = pa; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } static void _amd64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->PML4) free(vm->PML4); free(vm->phdr); free(vm); kd->vmst = NULL; } static int _amd64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && !_kvm_is_minidump(kd)); } static int _amd64_initvtop(kvm_t *kd) { struct kvm_nlist nl[2]; amd64_physaddr_t pa; kvaddr_t kernbase; amd64_pml4e_t *PML4; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PML4 = 0; if (kd->rawdump == 0) { if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no kernbase"); return (-1); } kernbase = nl[0].n_value; nl[0].n_name = "KPML4phys"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no KPML4phys"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); return (-1); } pa = le64toh(pa); PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE); if (PML4 == NULL) { _kvm_err(kd, kd->program, "cannot allocate PML4"); return (-1); } if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); free(PML4); return (-1); } kd->vmst->PML4 = PML4; return (0); } static int _amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_physaddr_t pdpe_pa; amd64_physaddr_t pde_pa; amd64_physaddr_t pte_pa; amd64_pml4e_t pml4e; amd64_pdpe_t pdpe; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pml4eindex; kvaddr_t pdpeindex; kvaddr_t pdeindex; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; size_t s; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (vm->PML4 == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: bootstrap data not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); } pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1); pml4e = le64toh(vm->PML4[pml4eindex]); if ((pml4e & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid"); goto invalid; } pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1); pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t)); s = _kvm_pa2off(kd, pdpe_pa, &ofs); if (s < sizeof(pdpe)) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found"); goto invalid; } if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe"); goto invalid; } pdpe = le64toh(pdpe); if ((pdpe & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid"); goto invalid; } if (pdpe & AMD64_PG_PS) { /* * No next-level page table; pdpe describes one 1GB page. */ a = (pdpe & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 1GB page address not in dump"); goto invalid; } else return (AMD64_NBPDP - (va & AMD64_PDPMASK)); } pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1); pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t)); s = _kvm_pa2off(kd, pde_pa, &ofs); if (s < sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found"); goto invalid; } if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pde"); goto invalid; } pde = le64toh(pde); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pde not valid"); goto invalid; } if (pde & AMD64_PG_PS) { /* * No final-level page table; pde describes one 2MB page. */ a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 2MB page address not in dump"); goto invalid; } else return (AMD64_NBPDR - (va & AMD64_PDRMASK)); } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found"); goto invalid; } if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read"); goto invalid; } if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pte not valid"); goto invalid; } a = (pte & AMD64_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: address not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); return (0); } return (_amd64_vatop(kd, va, pa)); } int -_amd64_native(kvm_t *kd) +_amd64_native(kvm_t *kd __unused) { #ifdef __amd64__ return (1); #else return (0); #endif } -struct kvm_arch kvm_amd64 = { +static struct kvm_arch kvm_amd64 = { .ka_probe = _amd64_probe, .ka_initvtop = _amd64_initvtop, .ka_freevtop = _amd64_freevtop, .ka_kvatop = _amd64_kvatop, .ka_native = _amd64_native, }; KVM_ARCH(kvm_amd64); Index: stable/11/lib/libkvm/kvm_arm.c =================================================================== --- stable/11/lib/libkvm/kvm_arm.c (revision 316125) +++ stable/11/lib/libkvm/kvm_arm.c (revision 316126) @@ -1,274 +1,278 @@ /*- * Copyright (c) 2005 Olivier Houchard * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ARM machine dependent routines for kvm. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #ifdef __arm__ #include #endif #include "kvm_private.h" #include "kvm_arm.h" struct vmstate { arm_pd_entry_t *l1pt; size_t phnum; GElf_Phdr *phdr; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; if (pgsz == 0) return (p->p_memsz - (pa - p->p_paddr)); return (pgsz - ((size_t)pa & (pgsz - 1))); } static void _arm_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; free(vm->phdr); free(vm); kd->vmst = NULL; } static int _arm_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && !_kvm_is_minidump(kd)); } static int _arm_initvtop(kvm_t *kd) { struct vmstate *vm; struct kvm_nlist nl[2]; kvaddr_t kernbase; arm_physaddr_t physaddr, pa; arm_pd_entry_t *l1pt; size_t i; int found; if (kd->rawdump) { _kvm_err(kd, kd->program, "raw dumps not supported on arm"); return (-1); } vm = _kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; vm->l1pt = NULL; if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1) return (-1); found = 0; for (i = 0; i < vm->phnum; i++) { if (vm->phdr[i].p_type == PT_DUMP_DELTA) { kernbase = vm->phdr[i].p_vaddr; physaddr = vm->phdr[i].p_paddr; found = 1; break; } } nl[1].n_name = NULL; if (!found) { nl[0].n_name = "kernbase"; if (kvm_nlist2(kd, nl) != 0) { #ifdef __arm__ kernbase = KERNBASE; #else _kvm_err(kd, kd->program, "cannot resolve kernbase"); return (-1); #endif } else kernbase = nl[0].n_value; nl[0].n_name = "physaddr"; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "couldn't get phys addr"); return (-1); } physaddr = nl[0].n_value; } nl[0].n_name = "kernel_l1pa"; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read kernel_l1pa"); return (-1); } l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE); if (l1pt == NULL) { _kvm_err(kd, kd->program, "cannot allocate l1pt"); return (-1); } if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) { _kvm_err(kd, kd->program, "cannot read l1pt"); free(l1pt); return (-1); } vm->l1pt = l1pt; return 0; } /* from arm/pmap.c */ #define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT) #define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S) #define l1pte_valid(pde) ((pde) != 0) #define l2pte_valid(pte) ((pte) != 0) #define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT) static int _arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm = kd->vmst; arm_pd_entry_t pd; arm_pt_entry_t pte; arm_physaddr_t pte_pa; off_t pte_off; if (vm->l1pt == NULL) return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE)); pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]); if (!l1pte_valid(pd)) goto invalid; if (l1pte_section_p(pd)) { /* 1MB section mapping. */ *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE)); } pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte); _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE); if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_arm_kvatop: pread"); goto invalid; } pte = _kvm32toh(kd, pte); if (!l2pte_valid(pte)) { goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE)); } *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET); return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE)); invalid: _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va); return 0; } /* * Machine-dependent initialization for ALL open kvm descriptors, * not just those for a kernel crash dump. Some architectures * have to deal with these NOT being constants! (i.e. m68k) */ #ifdef FBSD_NOT_YET int _kvm_mdopen(kvm_t *kd) { kd->usrstack = USRSTACK; kd->min_uva = VM_MIN_ADDRESS; kd->max_uva = VM_MAXUSER_ADDRESS; return (0); } #endif int +#ifdef __arm__ _arm_native(kvm_t *kd) +#else +_arm_native(kvm_t *kd __unused) +#endif { #ifdef __arm__ #if _BYTE_ORDER == _LITTLE_ENDIAN return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); #else return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); #endif #else return (0); #endif } -struct kvm_arch kvm_arm = { +static struct kvm_arch kvm_arm = { .ka_probe = _arm_probe, .ka_initvtop = _arm_initvtop, .ka_freevtop = _arm_freevtop, .ka_kvatop = _arm_kvatop, .ka_native = _arm_native, }; KVM_ARCH(kvm_arm); Index: stable/11/lib/libkvm/kvm_i386.c =================================================================== --- stable/11/lib/libkvm/kvm_i386.c (revision 316125) +++ stable/11/lib/libkvm/kvm_i386.c (revision 316126) @@ -1,430 +1,430 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if defined(LIBC_SCCS) && !defined(lint) #if 0 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; #endif #endif /* LIBC_SCCS and not lint */ /* * i386 machine dependent routines for kvm. Hopefully, the forthcoming * vm code will one day obsolete this module. */ #include #include #include #include #include #include #include #ifdef __i386__ #include /* For KERNBASE. */ #endif #include #include "kvm_private.h" #include "kvm_i386.h" struct vmstate { void *PTD; int pae; size_t phnum; GElf_Phdr *phdr; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; if (kd->rawdump) { *ofs = pa; return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } static void _i386_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->PTD) free(vm->PTD); free(vm->phdr); free(vm); kd->vmst = NULL; } static int _i386_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && !_kvm_is_minidump(kd)); } static int _i386_initvtop(kvm_t *kd) { struct kvm_nlist nl[2]; i386_physaddr_t pa; kvaddr_t kernbase; char *PTD; int i; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(struct vmstate)); if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PTD = 0; if (kd->rawdump == 0) { if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { #ifdef __i386__ kernbase = KERNBASE; /* for old kernels */ #else _kvm_err(kd, kd->program, "cannot resolve kernbase"); return (-1); #endif } else kernbase = nl[0].n_value; nl[0].n_name = "IdlePDPT"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) == 0) { i386_physaddr_pae_t pa64; if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePDPT"); return (-1); } pa = le32toh(pa); PTD = _kvm_malloc(kd, 4 * I386_PAGE_SIZE); if (PTD == NULL) { _kvm_err(kd, kd->program, "cannot allocate PTD"); return (-1); } for (i = 0; i < 4; i++) { if (kvm_read2(kd, pa + (i * sizeof(pa64)), &pa64, sizeof(pa64)) != sizeof(pa64)) { _kvm_err(kd, kd->program, "Cannot read PDPT"); free(PTD); return (-1); } pa64 = le64toh(pa64); if (kvm_read2(kd, pa64 & I386_PG_FRAME_PAE, PTD + (i * I386_PAGE_SIZE), I386_PAGE_SIZE) != I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PDPT"); free(PTD); return (-1); } } kd->vmst->PTD = PTD; kd->vmst->pae = 1; } else { nl[0].n_name = "IdlePTD"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePTD"); return (-1); } pa = le32toh(pa); PTD = _kvm_malloc(kd, I386_PAGE_SIZE); if (PTD == NULL) { _kvm_err(kd, kd->program, "cannot allocate PTD"); return (-1); } if (kvm_read2(kd, pa, PTD, I386_PAGE_SIZE) != I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PTD"); return (-1); } kd->vmst->PTD = PTD; kd->vmst->pae = 0; } return (0); } static int _i386_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_t offset; i386_physaddr_t pte_pa; i386_pde_t pde; i386_pte_t pte; kvaddr_t pdeindex; kvaddr_t pteindex; size_t s; i386_physaddr_t a; off_t ofs; i386_pde_t *PTD; vm = kd->vmst; PTD = (i386_pde_t *)vm->PTD; offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (PTD == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: bootstrap data not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); } pdeindex = va >> I386_PDRSHIFT; pde = le32toh(PTD[pdeindex]); if ((pde & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_vatop: pde not valid"); goto invalid; } if (pde & I386_PG_PS) { /* * No second-level page table; ptd describes one 4MB * page. (We assume that the kernel wouldn't set * PG_PS without enabling it cr0). */ offset = va & I386_PAGE_PS_MASK; a = (pde & I386_PG_PS_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: 4MB page address not in dump"); goto invalid; } return (I386_NBPDR - offset); } pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG - 1); pte_pa = (pde & I386_PG_FRAME) + (pteindex * sizeof(pte)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_i386_vatop: pte_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_i386_vatop: pread"); goto invalid; } pte = le32toh(pte); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop: pte not valid"); goto invalid; } a = (pte & I386_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: address not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_pae_t offset; i386_physaddr_pae_t pte_pa; i386_pde_pae_t pde; i386_pte_pae_t pte; kvaddr_t pdeindex; kvaddr_t pteindex; size_t s; i386_physaddr_pae_t a; off_t ofs; i386_pde_pae_t *PTD; vm = kd->vmst; PTD = (i386_pde_pae_t *)vm->PTD; offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (PTD == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: bootstrap data not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); } pdeindex = va >> I386_PDRSHIFT_PAE; pde = le64toh(PTD[pdeindex]); if ((pde & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop_pae: pde not valid"); goto invalid; } if (pde & I386_PG_PS) { /* * No second-level page table; ptd describes one 2MB * page. (We assume that the kernel wouldn't set * PG_PS without enabling it cr0). */ offset = va & I386_PAGE_PS_MASK_PAE; a = (pde & I386_PG_PS_FRAME_PAE) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: 2MB page address not in dump"); goto invalid; } return (I386_NBPDR_PAE - offset); } pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG_PAE - 1); pte_pa = (pde & I386_PG_FRAME_PAE) + (pteindex * sizeof(pde)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_i386_vatop_pae: pdpe_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_i386_vatop_pae: read"); goto invalid; } pte = le64toh(pte); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: pte not valid"); goto invalid; } a = (pte & I386_PG_FRAME_PAE) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: address not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } if (kd->vmst->pae) return (_i386_vatop_pae(kd, va, pa)); else return (_i386_vatop(kd, va, pa)); } int -_i386_native(kvm_t *kd) +_i386_native(kvm_t *kd __unused) { #ifdef __i386__ return (1); #else return (0); #endif } -struct kvm_arch kvm_i386 = { +static struct kvm_arch kvm_i386 = { .ka_probe = _i386_probe, .ka_initvtop = _i386_initvtop, .ka_freevtop = _i386_freevtop, .ka_kvatop = _i386_kvatop, .ka_native = _i386_native, }; KVM_ARCH(kvm_i386); Index: stable/11/lib/libkvm/kvm_minidump_aarch64.c =================================================================== --- stable/11/lib/libkvm/kvm_minidump_aarch64.c (revision 316125) +++ stable/11/lib/libkvm/kvm_minidump_aarch64.c (revision 316126) @@ -1,253 +1,253 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_amd64.c r261799 */ #include __FBSDID("$FreeBSD$"); /* * ARM64 (AArch64) machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include "../../sys/arm64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_aarch64.h" #define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; struct hpt hpt; uint64_t *page_map; }; static int _aarch64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && _kvm_is_minidump(kd)); } static void _aarch64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; _kvm_hpt_free(&vm->hpt); free(vm->page_map); free(vm); kd->vmst = NULL; } static int _aarch64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; uint64_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); free(bitmap); return (-1); } off += aarch64_round_page(vmst->hdr.bitmapsize); vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); if (vmst->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize); free(bitmap); return (-1); } /* This is the end of the dump, savecore may have truncated it. */ /* * XXX: This doesn't make sense. The pmap is not at the end, * and if it is truncated we don't have any actual data (it's * all stored after the bitmap and pmap. -- jhb */ if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) < AARCH64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize); free(bitmap); return (-1); } off += vmst->hdr.pmapsize; /* build physical address hash table for sparse pages */ _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, AARCH64_PAGE_SIZE, sizeof(*bitmap)); free(bitmap); return (0); } static int _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; aarch64_physaddr_t offset; aarch64_pte_t l3; kvaddr_t l3_index; aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AARCH64_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~AARCH64_PAGE_MASK; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; l3 = le64toh(vm->page_map[l3_index]); if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: pde not valid"); goto invalid; } a = l3 & ~AARCH64_ATTR_MASK; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_aarch64_minidump_kvatop called in live kernel!"); return (0); } return (_aarch64_minidump_vatop(kd, va, pa)); } static int -_aarch64_native(kvm_t *kd) +_aarch64_native(kvm_t *kd __unused) { #ifdef __aarch64__ return (1); #else return (0); #endif } -struct kvm_arch kvm_aarch64_minidump = { +static struct kvm_arch kvm_aarch64_minidump = { .ka_probe = _aarch64_minidump_probe, .ka_initvtop = _aarch64_minidump_initvtop, .ka_freevtop = _aarch64_minidump_freevtop, .ka_kvatop = _aarch64_minidump_kvatop, .ka_native = _aarch64_native, }; KVM_ARCH(kvm_aarch64_minidump); Index: stable/11/lib/libkvm/kvm_minidump_amd64.c =================================================================== --- stable/11/lib/libkvm/kvm_minidump_amd64.c (revision 316125) +++ stable/11/lib/libkvm/kvm_minidump_amd64.c (revision 316126) @@ -1,321 +1,321 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD64 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/amd64/include/minidump.h" #include #include "kvm_private.h" #include "kvm_amd64.h" #define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; struct hpt hpt; amd64_pte_t *page_map; }; static int _amd64_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && _kvm_is_minidump(kd)); } static void _amd64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; _kvm_hpt_free(&vm->hpt); if (vm->page_map) free(vm->page_map); free(vm); kd->vmst = NULL; } static int _amd64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; uint64_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } /* * NB: amd64 minidump header is binary compatible between version 1 * and version 2; this may not be the case for the future versions. */ vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize); bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); free(bitmap); return (-1); } off += amd64_round_page(vmst->hdr.bitmapsize); vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); if (vmst->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize); free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) != (ssize_t)vmst->hdr.pmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize); free(bitmap); return (-1); } off += vmst->hdr.pmapsize; /* build physical address hash table for sparse pages */ _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, AMD64_PAGE_SIZE, sizeof(*bitmap)); free(bitmap); return (0); } static int _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_pte_t pte; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; pte = le64toh(vm->page_map[pteindex]); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { amd64_pte_t pt[AMD64_NPTEPG]; struct vmstate *vm; amd64_physaddr_t offset; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pteindex; kvaddr_t pdeindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT; if (pdeindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; pde = le64toh(vm->page_map[pdeindex]); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pde not valid"); goto invalid; } if ((pde & AMD64_PG_PS) == 0) { a = pde & AMD64_PG_FRAME; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pt physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } /* TODO: Just read the single PTE */ if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read %d bytes for page table", AMD64_PAGE_SIZE); return (-1); } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte = le64toh(pt[pteindex]); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; } else { a = pde & AMD64_PG_PS_FRAME; a += (va & AMD64_PDRMASK) ^ offset; } ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_amd64_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_amd64_minidump_kvatop called in live kernel!"); return (0); } if (((struct vmstate *)kd->vmst)->hdr.version == 1) return (_amd64_minidump_vatop_v1(kd, va, pa)); else return (_amd64_minidump_vatop(kd, va, pa)); } -struct kvm_arch kvm_amd64_minidump = { +static struct kvm_arch kvm_amd64_minidump = { .ka_probe = _amd64_minidump_probe, .ka_initvtop = _amd64_minidump_initvtop, .ka_freevtop = _amd64_minidump_freevtop, .ka_kvatop = _amd64_minidump_kvatop, .ka_native = _amd64_native, }; KVM_ARCH(kvm_amd64_minidump); Index: stable/11/lib/libkvm/kvm_minidump_arm.c =================================================================== --- stable/11/lib/libkvm/kvm_minidump_arm.c (revision 316125) +++ stable/11/lib/libkvm/kvm_minidump_arm.c (revision 316126) @@ -1,237 +1,237 @@ /*- * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_i386.c,v 1.2 2006/06/05 08:51:14 */ #include __FBSDID("$FreeBSD$"); /* * ARM machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include #include "../../sys/arm/include/minidump.h" #include "kvm_private.h" #include "kvm_arm.h" #define arm_round_page(x) roundup2((kvaddr_t)(x), ARM_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; struct hpt hpt; void *ptemap; unsigned char ei_data; }; static int _arm_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && _kvm_is_minidump(kd)); } static void _arm_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } static int _arm_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase); vmst->hdr.arch = _kvm32toh(kd, vmst->hdr.arch); vmst->hdr.mmuformat = _kvm32toh(kd, vmst->hdr.mmuformat); if (vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_UNKNOWN) { /* This is a safe default as 1K pages are not used. */ vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6; } /* Skip header and msgbuf */ off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize); bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); free(bitmap); return (-1); } off += arm_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) != (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* Build physical address hash table for sparse pages */ _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, ARM_PAGE_SIZE, sizeof(*bitmap)); free(bitmap); return (0); } static int _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; arm_pt_entry_t pte; arm_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; arm_pt_entry_t *ptemap; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } vm = kd->vmst; ptemap = vm->ptemap; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; pte = _kvm32toh(kd, ptemap[pteindex]); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not valid"); goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page -> convert to be like 4K page */ offset = va & ARM_L2_S_OFFSET; a = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not supported"); goto invalid; } /* 4K page */ offset = va & ARM_L2_S_OFFSET; a = pte & ARM_L2_S_FRAME; } ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (ARM_PAGE_SIZE - offset); } else _kvm_err(kd, kd->program, "_arm_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -struct kvm_arch kvm_arm_minidump = { +static struct kvm_arch kvm_arm_minidump = { .ka_probe = _arm_minidump_probe, .ka_initvtop = _arm_minidump_initvtop, .ka_freevtop = _arm_minidump_freevtop, .ka_kvatop = _arm_minidump_kvatop, .ka_native = _arm_native, }; KVM_ARCH(kvm_arm_minidump); Index: stable/11/lib/libkvm/kvm_minidump_i386.c =================================================================== --- stable/11/lib/libkvm/kvm_minidump_i386.c (revision 316125) +++ stable/11/lib/libkvm/kvm_minidump_i386.c (revision 316126) @@ -1,260 +1,260 @@ /*- * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * i386 machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/i386/include/minidump.h" #include #include "kvm_private.h" #include "kvm_i386.h" #define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; struct hpt hpt; void *ptemap; }; static int _i386_minidump_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && _kvm_is_minidump(kd)); } static void _i386_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } static int _i386_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize); vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase); vmst->hdr.paemode = le32toh(vmst->hdr.paemode); /* Skip header and msgbuf */ off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize); bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); free(bitmap); return (-1); } off += i386_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) != (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* build physical address hash table for sparse pages */ _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, I386_PAGE_SIZE, sizeof(*bitmap)); free(bitmap); return (0); } static int _i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_pae_t offset; i386_pte_pae_t pte; kvaddr_t pteindex; i386_physaddr_pae_t a; off_t ofs; i386_pte_pae_t *ptemap; vm = kd->vmst; ptemap = vm->ptemap; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; pte = le64toh(ptemap[pteindex]); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME_PAE; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_t offset; i386_pte_t pte; kvaddr_t pteindex; i386_physaddr_t a; off_t ofs; i386_pte_t *ptemap; vm = kd->vmst; ptemap = vm->ptemap; offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; pte = le32toh(ptemap[pteindex]); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: pte not valid"); goto invalid; } a = pte & I386_PG_FRAME; ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_i386_minidump_vatop: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (I386_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, "_i386_minidump_vatop: virtual address 0x%jx not minidumped", (uintmax_t)va); goto invalid; } invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!"); return (0); } if (kd->vmst->hdr.paemode) return (_i386_minidump_vatop_pae(kd, va, pa)); else return (_i386_minidump_vatop(kd, va, pa)); } -struct kvm_arch kvm_i386_minidump = { +static struct kvm_arch kvm_i386_minidump = { .ka_probe = _i386_minidump_probe, .ka_initvtop = _i386_minidump_initvtop, .ka_freevtop = _i386_minidump_freevtop, .ka_kvatop = _i386_minidump_kvatop, .ka_native = _i386_native, }; KVM_ARCH(kvm_i386_minidump); Index: stable/11/lib/libkvm/kvm_minidump_mips.c =================================================================== --- stable/11/lib/libkvm/kvm_minidump_mips.c (revision 316125) +++ stable/11/lib/libkvm/kvm_minidump_mips.c (revision 316126) @@ -1,295 +1,299 @@ /*- * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * Copyright (c) 2006 Peter Wemm * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: FreeBSD: src/lib/libkvm/kvm_minidump_arm.c r214223 */ #include __FBSDID("$FreeBSD$"); /* * MIPS machine dependent routines for kvm and minidumps. */ #include #include #include #include #include #include #include #include "../../sys/mips/include/cpuregs.h" #include "../../sys/mips/include/minidump.h" #include "kvm_private.h" #include "kvm_mips.h" #define mips_round_page(x) roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE) struct vmstate { struct minidumphdr hdr; struct hpt hpt; void *ptemap; int pte_size; }; static int _mips_minidump_probe(kvm_t *kd) { if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 && kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); if (kd->nlehdr.e_machine != EM_MIPS) return (0); return (_kvm_is_minidump(kd)); } static void _mips_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } static int _mips_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 || kd->nlehdr.e_flags & EF_MIPS_ABI2) vmst->pte_size = 64; else vmst->pte_size = 32; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase); vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase); vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend); /* Skip header and msgbuf */ off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize); bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); free(bitmap); return (-1); } off += mips_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) != (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* Build physical address hash table for sparse pages */ _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, MIPS_PAGE_SIZE, sizeof(*bitmap)); free(bitmap); return (0); } static int _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; uint64_t pte; mips_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; uint32_t *ptemap32; uint64_t *ptemap64; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!"); return (0); } offset = va & MIPS_PAGE_MASK; /* Operate with page-aligned address */ va &= ~MIPS_PAGE_MASK; vm = kd->vmst; ptemap32 = vm->ptemap; ptemap64 = vm->ptemap; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) { a = va & MIPS_XKPHYS_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } else { if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT; if (vm->pte_size == 64) { pte = _kvm64toh(kd, ptemap64[pteindex]); a = MIPS64_PTE_TO_PA(pte); } else { pte = _kvm32toh(kd, ptemap32[pteindex]); a = MIPS32_PTE_TO_PA(pte); } if (!pte) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte " "not valid"); goto invalid; } } else { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual " "address 0x%jx not minidumped", (uintmax_t)va); return (0); } found: ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical " "address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (MIPS_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int +#ifdef __mips__ _mips_native(kvm_t *kd) +#else +_mips_native(kvm_t *kd __unused) +#endif { #ifdef __mips__ #ifdef __mips_n64 if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) return (0); #else if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32) return (0); #ifdef __mips_n32 if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2)) return (0); #else if (kd->nlehdr.e_flags & EF_MIPS_ABI2) return (0); #endif #endif #if _BYTE_ORDER == _LITTLE_ENDIAN return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); #else return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); #endif #else return (0); #endif } -struct kvm_arch kvm_mips_minidump = { +static struct kvm_arch kvm_mips_minidump = { .ka_probe = _mips_minidump_probe, .ka_initvtop = _mips_minidump_initvtop, .ka_freevtop = _mips_minidump_freevtop, .ka_kvatop = _mips_minidump_kvatop, .ka_native = _mips_native, }; KVM_ARCH(kvm_mips_minidump); Index: stable/11/lib/libkvm/kvm_powerpc.c =================================================================== --- stable/11/lib/libkvm/kvm_powerpc.c (revision 316125) +++ stable/11/lib/libkvm/kvm_powerpc.c (revision 316126) @@ -1,235 +1,235 @@ /*- * Copyright (c) 2008, Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "kvm_private.h" struct vmstate { void *map; size_t mapsz; size_t dmphdrsz; Elf32_Ehdr *eh; Elf32_Phdr *ph; }; static int valid_elf_header(Elf32_Ehdr *eh) { if (!IS_ELF(*eh)) return (0); if (eh->e_ident[EI_CLASS] != ELFCLASS32) return (0); if (eh->e_ident[EI_DATA] != ELFDATA2MSB) return (0); if (eh->e_ident[EI_VERSION] != EV_CURRENT) return (0); if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE) return (0); if (be16toh(eh->e_type) != ET_CORE) return (0); if (be16toh(eh->e_machine) != EM_PPC) return (0); /* Can't think of anything else to check... */ return (1); } static size_t dump_header_size(struct kerneldumpheader *dh) { if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0) return (0); if (strcmp(dh->architecture, "powerpc") != 0) return (0); /* That should do it... */ return (sizeof(*dh)); } /* * Map the ELF headers into the process' address space. We do this in two * steps: first the ELF header itself and using that information the whole * set of headers. */ static int powerpc_maphdrs(kvm_t *kd) { struct vmstate *vm; size_t mapsz; vm = kd->vmst; vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader); vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile"); return (-1); } vm->dmphdrsz = 0; vm->eh = vm->map; if (!valid_elf_header(vm->eh)) { /* * Hmmm, no ELF header. Maybe we still have a dump header. * This is normal when the core file wasn't created by * savecore(8), but instead was dumped over TFTP. We can * easily skip the dump header... */ vm->dmphdrsz = dump_header_size(vm->map); if (vm->dmphdrsz == 0) goto inval; vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); if (!valid_elf_header(vm->eh)) goto inval; } mapsz = be16toh(vm->eh->e_phentsize) * be16toh(vm->eh->e_phnum) + be32toh(vm->eh->e_phoff); munmap(vm->map, vm->mapsz); /* Map all headers. */ vm->mapsz = vm->dmphdrsz + mapsz; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile headers"); return (-1); } vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); vm->ph = (void *)((uintptr_t)vm->eh + be32toh(vm->eh->e_phoff)); return (0); inval: _kvm_err(kd, kd->program, "invalid corefile"); return (-1); } /* * Determine the offset within the corefile corresponding the virtual * address. Return the number of contiguous bytes in the corefile or * 0 when the virtual address is invalid. */ static size_t powerpc_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm = kd->vmst; Elf32_Phdr *ph; int nph; ph = vm->ph; nph = be16toh(vm->eh->e_phnum); while (nph && (va < be32toh(ph->p_vaddr) || va >= be32toh(ph->p_vaddr) + be32toh(ph->p_memsz))) { nph--; ph = (void *)((uintptr_t)ph + be16toh(vm->eh->e_phentsize)); } if (nph == 0) return (0); /* Segment found. Return file offset and range. */ *ofs = vm->dmphdrsz + be32toh(ph->p_offset) + (va - be32toh(ph->p_vaddr)); return (be32toh(ph->p_memsz) - (va - be32toh(ph->p_vaddr))); } static void _powerpc_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->eh != MAP_FAILED) munmap(vm->eh, vm->mapsz); free(vm); kd->vmst = NULL; } static int _powerpc_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_PPC) && kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); } static int _powerpc_initvtop(kvm_t *kd) { kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == NULL) return (-1); if (powerpc_maphdrs(kd) == -1) return (-1); return (0); } static int _powerpc_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm; vm = kd->vmst; if (be32toh(vm->ph->p_paddr) == 0xffffffff) return ((int)powerpc_va2off(kd, va, ofs)); _kvm_err(kd, kd->program, "Raw corefile not supported"); return (0); } static int -_powerpc_native(kvm_t *kd) +_powerpc_native(kvm_t *kd __unused) { #if defined(__powerpc__) && !defined(__powerpc64__) return (1); #else return (0); #endif } -struct kvm_arch kvm_powerpc = { +static struct kvm_arch kvm_powerpc = { .ka_probe = _powerpc_probe, .ka_initvtop = _powerpc_initvtop, .ka_freevtop = _powerpc_freevtop, .ka_kvatop = _powerpc_kvatop, .ka_native = _powerpc_native, }; KVM_ARCH(kvm_powerpc); Index: stable/11/lib/libkvm/kvm_powerpc64.c =================================================================== --- stable/11/lib/libkvm/kvm_powerpc64.c (revision 316125) +++ stable/11/lib/libkvm/kvm_powerpc64.c (revision 316126) @@ -1,236 +1,236 @@ /*- * Copyright (c) 2008, Juniper Networks, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "kvm_private.h" struct vmstate { void *map; size_t mapsz; size_t dmphdrsz; Elf64_Ehdr *eh; Elf64_Phdr *ph; }; static int valid_elf_header(Elf64_Ehdr *eh) { if (!IS_ELF(*eh)) return (0); if (eh->e_ident[EI_CLASS] != ELFCLASS64) return (0); if (eh->e_ident[EI_DATA] != ELFDATA2MSB) return (0); if (eh->e_ident[EI_VERSION] != EV_CURRENT) return (0); if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE) return (0); if (be16toh(eh->e_type) != ET_CORE) return (0); if (be16toh(eh->e_machine) != EM_PPC64) return (0); /* Can't think of anything else to check... */ return (1); } static size_t dump_header_size(struct kerneldumpheader *dh) { if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0) return (0); if (strcmp(dh->architecture, "powerpc64") != 0) return (0); /* That should do it... */ return (sizeof(*dh)); } /* * Map the ELF headers into the process' address space. We do this in two * steps: first the ELF header itself and using that information the whole * set of headers. */ static int powerpc_maphdrs(kvm_t *kd) { struct vmstate *vm; size_t mapsz; vm = kd->vmst; vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader); vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile"); return (-1); } vm->dmphdrsz = 0; vm->eh = vm->map; if (!valid_elf_header(vm->eh)) { /* * Hmmm, no ELF header. Maybe we still have a dump header. * This is normal when the core file wasn't created by * savecore(8), but instead was dumped over TFTP. We can * easily skip the dump header... */ vm->dmphdrsz = dump_header_size(vm->map); if (vm->dmphdrsz == 0) goto inval; vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); if (!valid_elf_header(vm->eh)) goto inval; } mapsz = be16toh(vm->eh->e_phentsize) * be16toh(vm->eh->e_phnum) + be64toh(vm->eh->e_phoff); munmap(vm->map, vm->mapsz); /* Map all headers. */ vm->mapsz = vm->dmphdrsz + mapsz; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile headers"); return (-1); } vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); vm->ph = (void *)((uintptr_t)vm->eh + (uintptr_t)be64toh(vm->eh->e_phoff)); return (0); inval: _kvm_err(kd, kd->program, "invalid corefile"); return (-1); } /* * Determine the offset within the corefile corresponding the virtual * address. Return the number of contiguous bytes in the corefile or * 0 when the virtual address is invalid. */ static size_t powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm = kd->vmst; Elf64_Phdr *ph; int nph; ph = vm->ph; nph = be16toh(vm->eh->e_phnum); while (nph && (va < be64toh(ph->p_vaddr) || va >= be64toh(ph->p_vaddr) + be64toh(ph->p_memsz))) { nph--; ph = (void *)((uintptr_t)ph + be16toh(vm->eh->e_phentsize)); } if (nph == 0) return (0); /* Segment found. Return file offset and range. */ *ofs = vm->dmphdrsz + be64toh(ph->p_offset) + (va - be64toh(ph->p_vaddr)); return (be64toh(ph->p_memsz) - (va - be64toh(ph->p_vaddr))); } static void _powerpc64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->eh != MAP_FAILED) munmap(vm->eh, vm->mapsz); free(vm); kd->vmst = NULL; } static int _powerpc64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) && kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); } static int _powerpc64_initvtop(kvm_t *kd) { kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == NULL) return (-1); if (powerpc_maphdrs(kd) == -1) return (-1); return (0); } static int _powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm; vm = kd->vmst; if (be64toh(vm->ph->p_paddr) == 0xffffffffffffffff) return ((int)powerpc64_va2off(kd, va, ofs)); _kvm_err(kd, kd->program, "Raw corefile not supported"); return (0); } static int -_powerpc64_native(kvm_t *kd) +_powerpc64_native(kvm_t *kd __unused) { #ifdef __powerpc64__ return (1); #else return (0); #endif } -struct kvm_arch kvm_powerpc64 = { +static struct kvm_arch kvm_powerpc64 = { .ka_probe = _powerpc64_probe, .ka_initvtop = _powerpc64_initvtop, .ka_freevtop = _powerpc64_freevtop, .ka_kvatop = _powerpc64_kvatop, .ka_native = _powerpc64_native, }; KVM_ARCH(kvm_powerpc64); Index: stable/11/lib/libkvm/kvm_sparc64.c =================================================================== --- stable/11/lib/libkvm/kvm_sparc64.c (revision 316125) +++ stable/11/lib/libkvm/kvm_sparc64.c (revision 316126) @@ -1,242 +1,242 @@ /*- * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/lib/libkvm/kvm_i386.c,v 1.15 2001/10/10 17:48:43 */ #include __FBSDID("$FreeBSD$"); #if defined(LIBC_SCCS) && !defined(lint) #if 0 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; #endif #endif /* LIBC_SCCS and not lint */ /* * sparc64 machine dependent routines for kvm. */ #include #include #include #include #include #include #include "../../sys/sparc64/include/kerneldump.h" #include "kvm_private.h" #include "kvm_sparc64.h" struct vmstate { off_t vm_tsb_off; uint64_t vm_tsb_mask; int vm_nregions; struct sparc64_dump_reg *vm_regions; }; static int _sparc64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_SPARCV9)); } static void _sparc64_freevtop(kvm_t *kd) { free(kd->vmst->vm_regions); free(kd->vmst); kd->vmst = NULL; } static int _sparc64_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size) { /* XXX This has to be a raw file read, kvm_read is virtual. */ if (pread(kd->pmfd, buf, size, pos) != (ssize_t)size) { _kvm_syserr(kd, kd->program, "_sparc64_read_phys: pread"); return (0); } return (1); } static int _sparc64_reg_cmp(const void *a, const void *b) { const struct sparc64_dump_reg *ra, *rb; ra = a; rb = b; if (ra->dr_pa < rb->dr_pa) return (-1); else if (ra->dr_pa >= rb->dr_pa + rb->dr_size) return (1); else return (0); } #define KVM_OFF_NOTFOUND 0 static off_t _sparc64_find_off(struct vmstate *vm, uint64_t pa, uint64_t size) { struct sparc64_dump_reg *reg, key; vm_offset_t o; key.dr_pa = pa; reg = bsearch(&key, vm->vm_regions, vm->vm_nregions, sizeof(*vm->vm_regions), _sparc64_reg_cmp); if (reg == NULL) return (KVM_OFF_NOTFOUND); o = pa - reg->dr_pa; if (o + size > reg->dr_size) return (KVM_OFF_NOTFOUND); return (reg->dr_offs + o); } static int _sparc64_initvtop(kvm_t *kd) { struct sparc64_dump_hdr hdr; struct sparc64_dump_reg *regs; struct vmstate *vm; size_t regsz; int i; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; if (!_sparc64_read_phys(kd, 0, &hdr, sizeof(hdr))) goto fail_vm; hdr.dh_hdr_size = be64toh(hdr.dh_hdr_size); hdr.dh_tsb_pa = be64toh(hdr.dh_tsb_pa); hdr.dh_tsb_size = be64toh(hdr.dh_tsb_size); hdr.dh_tsb_mask = be64toh(hdr.dh_tsb_mask); hdr.dh_nregions = be32toh(hdr.dh_nregions); regsz = hdr.dh_nregions * sizeof(*regs); regs = _kvm_malloc(kd, regsz); if (regs == NULL) { _kvm_err(kd, kd->program, "cannot allocate regions"); goto fail_vm; } if (!_sparc64_read_phys(kd, sizeof(hdr), regs, regsz)) goto fail_regs; for (i = 0; i < hdr.dh_nregions; i++) { regs[i].dr_pa = be64toh(regs[i].dr_pa); regs[i].dr_size = be64toh(regs[i].dr_size); regs[i].dr_offs = be64toh(regs[i].dr_offs); } qsort(regs, hdr.dh_nregions, sizeof(*regs), _sparc64_reg_cmp); vm->vm_tsb_mask = hdr.dh_tsb_mask; vm->vm_regions = regs; vm->vm_nregions = hdr.dh_nregions; vm->vm_tsb_off = _sparc64_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) { _kvm_err(kd, kd->program, "tsb not found in dump"); goto fail_regs; } return (0); fail_regs: free(regs); fail_vm: free(vm); return (-1); } static int _sparc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct sparc64_tte tte; off_t tte_off; kvaddr_t vpn; off_t pa_off; kvaddr_t pg_off; int rest; pg_off = va & SPARC64_PAGE_MASK; if (va >= SPARC64_MIN_DIRECT_ADDRESS) pa_off = SPARC64_DIRECT_TO_PHYS(va) & ~SPARC64_PAGE_MASK; else { vpn = va >> SPARC64_PAGE_SHIFT; tte_off = kd->vmst->vm_tsb_off + ((vpn & kd->vmst->vm_tsb_mask) << SPARC64_TTE_SHIFT); if (!_sparc64_read_phys(kd, tte_off, &tte, sizeof(tte))) goto invalid; tte.tte_vpn = be64toh(tte.tte_vpn); tte.tte_data = be64toh(tte.tte_data); if (!sparc64_tte_match(&tte, va)) goto invalid; pa_off = SPARC64_TTE_GET_PA(&tte); } rest = SPARC64_PAGE_SIZE - pg_off; pa_off = _sparc64_find_off(kd->vmst, pa_off, rest); if (pa_off == KVM_OFF_NOTFOUND) goto invalid; *pa = pa_off + pg_off; return (rest); invalid: _kvm_err(kd, 0, "invalid address (%jx)", (uintmax_t)va); return (0); } static int -_sparc64_native(kvm_t *kd) +_sparc64_native(kvm_t *kd __unused) { #ifdef __sparc64__ return (1); #else return (0); #endif } -struct kvm_arch kvm_sparc64 = { +static struct kvm_arch kvm_sparc64 = { .ka_probe = _sparc64_probe, .ka_initvtop = _sparc64_initvtop, .ka_freevtop = _sparc64_freevtop, .ka_kvatop = _sparc64_kvatop, .ka_native = _sparc64_native, }; KVM_ARCH(kvm_sparc64); Index: stable/11 =================================================================== --- stable/11 (revision 316125) +++ stable/11 (revision 316126) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r315697