Index: head/lib/libkvm/Makefile =================================================================== --- head/lib/libkvm/Makefile (revision 334052) +++ head/lib/libkvm/Makefile (revision 334053) @@ -1,44 +1,44 @@ # @(#)Makefile 8.1 (Berkeley) 6/4/93 # $FreeBSD$ PACKAGE=lib${LIB} LIB= kvm SHLIBDIR?= /lib SHLIB_MAJOR= 7 -CFLAGS+=-DLIBC_SCCS -I${.CURDIR} +CFLAGS+=-DNO__SCCSID -I${.CURDIR} WARNS?= 6 SRCS= kvm.c kvm_cptime.c kvm_getloadavg.c \ kvm_getswapinfo.c kvm_pcpu.c kvm_private.c kvm_proc.c kvm_vnet.c \ kvm_minidump_aarch64.c \ kvm_amd64.c kvm_minidump_amd64.c \ kvm_arm.c kvm_minidump_arm.c \ kvm_i386.c kvm_minidump_i386.c \ kvm_minidump_mips.c \ kvm_powerpc.c kvm_powerpc64.c \ kvm_sparc64.c INCS= kvm.h LIBADD= elf MAN= kvm.3 kvm_getcptime.3 kvm_geterr.3 kvm_getloadavg.3 \ kvm_getpcpu.3 kvm_getprocs.3 kvm_getswapinfo.3 kvm_native.3 \ kvm_nlist.3 kvm_open.3 kvm_read.3 MLINKS+=kvm_getpcpu.3 kvm_getmaxcpu.3 \ kvm_getpcpu.3 kvm_dpcpu_setcpu.3 \ kvm_getpcpu.3 kvm_read_zpcpu.3 \ kvm_getpcpu.3 kvm_counter_u64_fetch.3 MLINKS+=kvm_getprocs.3 kvm_getargv.3 kvm_getprocs.3 kvm_getenvv.3 MLINKS+=kvm_nlist.3 kvm_nlist2.3 MLINKS+=kvm_open.3 kvm_close.3 kvm_open.3 kvm_open2.3 kvm_open.3 kvm_openfiles.3 MLINKS+=kvm_read.3 kvm_read2.3 kvm_read.3 kvm_write.3 .include HAS_TESTS= SUBDIR.${MK_TESTS}= tests .include Index: head/lib/libkvm/kvm.c =================================================================== --- head/lib/libkvm/kvm.c (revision 334052) +++ head/lib/libkvm/kvm.c (revision 334053) @@ -1,506 +1,501 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); - -#if defined(LIBC_SCCS) && !defined(lint) -#if 0 -static char sccsid[] = "@(#)kvm.c 8.2 (Berkeley) 2/13/94"; -#endif -#endif /* LIBC_SCCS and not lint */ +__SCCSID("@(#)kvm.c 8.2 (Berkeley) 2/13/94"); #include #include #define _WANT_VNET #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "kvm_private.h" SET_DECLARE(kvm_arch, struct kvm_arch); static char _kd_is_null[] = ""; char * kvm_geterr(kvm_t *kd) { if (kd == NULL) return (_kd_is_null); return (kd->errbuf); } static int _kvm_read_kernel_ehdr(kvm_t *kd) { Elf *elf; if (elf_version(EV_CURRENT) == EV_NONE) { _kvm_err(kd, kd->program, "Unsupported libelf"); return (-1); } elf = elf_begin(kd->nlfd, ELF_C_READ, NULL); if (elf == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); return (-1); } if (elf_kind(elf) != ELF_K_ELF) { _kvm_err(kd, kd->program, "kernel is not an ELF file"); return (-1); } if (gelf_getehdr(elf, &kd->nlehdr) == NULL) { _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); elf_end(elf); return (-1); } elf_end(elf); switch (kd->nlehdr.e_ident[EI_DATA]) { case ELFDATA2LSB: case ELFDATA2MSB: return (0); default: _kvm_err(kd, kd->program, "unsupported ELF data encoding for kernel"); return (-1); } } static kvm_t * _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout) { struct kvm_arch **parch; struct stat st; kd->vmfd = -1; kd->pmfd = -1; kd->nlfd = -1; kd->vmst = NULL; kd->procbase = NULL; kd->argspc = NULL; kd->argv = NULL; if (uf == NULL) uf = getbootfile(); else if (strlen(uf) >= MAXPATHLEN) { _kvm_err(kd, kd->program, "exec file name too long"); goto failed; } if (flag & ~O_RDWR) { _kvm_err(kd, kd->program, "bad flags arg"); goto failed; } if (mf == NULL) mf = _PATH_MEM; if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) { _kvm_syserr(kd, kd->program, "%s", mf); goto failed; } if (fstat(kd->pmfd, &st) < 0) { _kvm_syserr(kd, kd->program, "%s", mf); goto failed; } if (S_ISREG(st.st_mode) && st.st_size <= 0) { errno = EINVAL; _kvm_syserr(kd, kd->program, "empty file"); goto failed; } if (S_ISCHR(st.st_mode)) { /* * If this is a character special device, then check that * it's /dev/mem. If so, open kmem too. (Maybe we should * make it work for either /dev/mem or /dev/kmem -- in either * case you're working with a live kernel.) */ if (strcmp(mf, _PATH_DEVNULL) == 0) { kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC); return (kd); } else if (strcmp(mf, _PATH_MEM) == 0) { if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) < 0) { _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM); goto failed; } return (kd); } } /* * This is either a crash dump or a remote live system with its physical * memory fully accessible via a special device. * Open the namelist fd and determine the architecture. */ if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) { _kvm_syserr(kd, kd->program, "%s", uf); goto failed; } if (_kvm_read_kernel_ehdr(kd) < 0) goto failed; if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0 || strncmp(mf, _PATH_DEVVMM, strlen(_PATH_DEVVMM)) == 0) { kd->rawdump = 1; kd->writable = 1; } SET_FOREACH(parch, kvm_arch) { if ((*parch)->ka_probe(kd)) { kd->arch = *parch; break; } } if (kd->arch == NULL) { _kvm_err(kd, kd->program, "unsupported architecture"); goto failed; } /* * Non-native kernels require a symbol resolver. */ if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) { _kvm_err(kd, kd->program, "non-native kernel requires a symbol resolver"); goto failed; } /* * Initialize the virtual address translation machinery. */ if (kd->arch->ka_initvtop(kd) < 0) goto failed; return (kd); failed: /* * Copy out the error if doing sane error semantics. */ if (errout != NULL) strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX); (void)kvm_close(kd); return (NULL); } kvm_t * kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag, char *errout) { kvm_t *kd; if ((kd = calloc(1, sizeof(*kd))) == NULL) { if (errout != NULL) (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX); return (NULL); } return (_kvm_open(kd, uf, mf, flag, errout)); } kvm_t * kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag, const char *errstr) { kvm_t *kd; if ((kd = calloc(1, sizeof(*kd))) == NULL) { if (errstr != NULL) (void)fprintf(stderr, "%s: %s\n", errstr, strerror(errno)); return (NULL); } kd->program = errstr; return (_kvm_open(kd, uf, mf, flag, NULL)); } kvm_t * kvm_open2(const char *uf, const char *mf, int flag, char *errout, int (*resolver)(const char *, kvaddr_t *)) { kvm_t *kd; if ((kd = calloc(1, sizeof(*kd))) == NULL) { if (errout != NULL) (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX); return (NULL); } kd->resolve_symbol = resolver; return (_kvm_open(kd, uf, mf, flag, errout)); } int kvm_close(kvm_t *kd) { int error = 0; if (kd == NULL) { errno = EINVAL; return (-1); } if (kd->vmst != NULL) kd->arch->ka_freevtop(kd); if (kd->pmfd >= 0) error |= close(kd->pmfd); if (kd->vmfd >= 0) error |= close(kd->vmfd); if (kd->nlfd >= 0) error |= close(kd->nlfd); if (kd->procbase != 0) free((void *)kd->procbase); if (kd->argbuf != 0) free((void *) kd->argbuf); if (kd->argspc != 0) free((void *) kd->argspc); if (kd->argv != 0) free((void *)kd->argv); if (kd->pt_map != NULL) free(kd->pt_map); if (kd->page_map != NULL) free(kd->page_map); if (kd->sparse_map != MAP_FAILED) munmap(kd->sparse_map, kd->pt_sparse_size); free((void *)kd); return (error); } int kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl) { /* * If called via the public interface, permit initialization of * further virtualized modules on demand. */ return (_kvm_nlist(kd, nl, 1)); } int kvm_nlist(kvm_t *kd, struct nlist *nl) { struct kvm_nlist *kl; int count, i, nfail; /* * Avoid reporting truncated addresses by failing for non-native * cores. */ if (!kvm_native(kd)) { _kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore"); return (-1); } for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0'; count++) ; if (count == 0) return (0); kl = calloc(count + 1, sizeof(*kl)); for (i = 0; i < count; i++) kl[i].n_name = nl[i].n_name; nfail = kvm_nlist2(kd, kl); for (i = 0; i < count; i++) { nl[i].n_type = kl[i].n_type; nl[i].n_other = 0; nl[i].n_desc = 0; nl[i].n_value = kl[i].n_value; } return (nfail); } ssize_t kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len) { return (kvm_read2(kd, kva, buf, len)); } ssize_t kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len) { int cc; ssize_t cr; off_t pa; char *cp; if (ISALIVE(kd)) { /* * We're using /dev/kmem. Just read straight from the * device and let the active kernel do the address translation. */ errno = 0; if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) { _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)kva); return (-1); } cr = read(kd->vmfd, buf, len); if (cr < 0) { _kvm_syserr(kd, 0, "kvm_read"); return (-1); } else if (cr < (ssize_t)len) _kvm_err(kd, kd->program, "short read"); return (cr); } cp = buf; while (len > 0) { cc = kd->arch->ka_kvatop(kd, kva, &pa); if (cc == 0) return (-1); if (cc > (ssize_t)len) cc = len; errno = 0; if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) { _kvm_syserr(kd, 0, _PATH_MEM); break; } cr = read(kd->pmfd, cp, cc); if (cr < 0) { _kvm_syserr(kd, kd->program, "kvm_read"); break; } /* * If ka_kvatop returns a bogus value or our core file is * truncated, we might wind up seeking beyond the end of the * core file in which case the read will return 0 (EOF). */ if (cr == 0) break; cp += cr; kva += cr; len -= cr; } return (cp - (char *)buf); } ssize_t kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len) { int cc; ssize_t cw; off_t pa; const char *cp; if (!ISALIVE(kd) && !kd->writable) { _kvm_err(kd, kd->program, "kvm_write not implemented for dead kernels"); return (-1); } if (ISALIVE(kd)) { /* * Just like kvm_read, only we write. */ errno = 0; if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) { _kvm_err(kd, 0, "invalid address (%lx)", kva); return (-1); } cc = write(kd->vmfd, buf, len); if (cc < 0) { _kvm_syserr(kd, 0, "kvm_write"); return (-1); } else if ((size_t)cc < len) _kvm_err(kd, kd->program, "short write"); return (cc); } cp = buf; while (len > 0) { cc = kd->arch->ka_kvatop(kd, kva, &pa); if (cc == 0) return (-1); if (cc > (ssize_t)len) cc = len; errno = 0; if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) { _kvm_syserr(kd, 0, _PATH_MEM); break; } cw = write(kd->pmfd, cp, cc); if (cw < 0) { _kvm_syserr(kd, kd->program, "kvm_write"); break; } /* * If ka_kvatop returns a bogus value or our core file is * truncated, we might wind up seeking beyond the end of the * core file in which case the read will return 0 (EOF). */ if (cw == 0) break; cp += cw; kva += cw; len -= cw; } return (cp - (const char *)buf); } int kvm_native(kvm_t *kd) { if (ISALIVE(kd)) return (1); return (kd->arch->ka_native(kd)); } int kvm_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *closure) { if (kd->arch->ka_walk_pages == NULL) return (0); return (kd->arch->ka_walk_pages(kd, cb, closure)); } Index: head/lib/libkvm/kvm_amd64.c =================================================================== --- head/lib/libkvm/kvm_amd64.c (revision 334052) +++ head/lib/libkvm/kvm_amd64.c (revision 334053) @@ -1,340 +1,335 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); - -#if defined(LIBC_SCCS) && !defined(lint) -#if 0 -static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; -#endif -#endif /* LIBC_SCCS and not lint */ +__SCCSID("@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"); /* * AMD64 machine dependent routines for kvm. Hopefully, the forthcoming * vm code will one day obsolete this module. */ #include #include #include #include #include #include #include #include #include #include "kvm_private.h" #include "kvm_amd64.h" struct vmstate { size_t phnum; GElf_Phdr *phdr; amd64_pml4e_t *PML4; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; if (kd->rawdump) { *ofs = pa; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } static void _amd64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->PML4) free(vm->PML4); free(vm->phdr); free(vm); kd->vmst = NULL; } static int _amd64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && !_kvm_is_minidump(kd)); } static int _amd64_initvtop(kvm_t *kd) { struct kvm_nlist nl[2]; amd64_physaddr_t pa; kvaddr_t kernbase; amd64_pml4e_t *PML4; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PML4 = 0; if (kd->rawdump == 0) { if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no kernbase"); return (-1); } kernbase = nl[0].n_value; nl[0].n_name = "KPML4phys"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no KPML4phys"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); return (-1); } pa = le64toh(pa); PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE); if (PML4 == NULL) { _kvm_err(kd, kd->program, "cannot allocate PML4"); return (-1); } if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); free(PML4); return (-1); } kd->vmst->PML4 = PML4; return (0); } static int _amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_physaddr_t pdpe_pa; amd64_physaddr_t pde_pa; amd64_physaddr_t pte_pa; amd64_pml4e_t pml4e; amd64_pdpe_t pdpe; amd64_pde_t pde; amd64_pte_t pte; kvaddr_t pml4eindex; kvaddr_t pdpeindex; kvaddr_t pdeindex; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; size_t s; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (vm->PML4 == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: bootstrap data not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); } pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1); pml4e = le64toh(vm->PML4[pml4eindex]); if ((pml4e & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid"); goto invalid; } pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1); pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t)); s = _kvm_pa2off(kd, pdpe_pa, &ofs); if (s < sizeof(pdpe)) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found"); goto invalid; } if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe"); goto invalid; } pdpe = le64toh(pdpe); if ((pdpe & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid"); goto invalid; } if (pdpe & AMD64_PG_PS) { /* * No next-level page table; pdpe describes one 1GB page. */ a = (pdpe & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 1GB page address not in dump"); goto invalid; } else return (AMD64_NBPDP - (va & AMD64_PDPMASK)); } pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1); pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t)); s = _kvm_pa2off(kd, pde_pa, &ofs); if (s < sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found"); goto invalid; } if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read pde"); goto invalid; } pde = le64toh(pde); if ((pde & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pde not valid"); goto invalid; } if (pde & AMD64_PG_PS) { /* * No final-level page table; pde describes one 2MB page. */ a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: 2MB page address not in dump"); goto invalid; } else return (AMD64_NBPDR - (va & AMD64_PDRMASK)); } pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found"); goto invalid; } if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_amd64_vatop: read"); goto invalid; } if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: pte not valid"); goto invalid; } a = (pte & AMD64_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_amd64_vatop: address not in dump"); goto invalid; } else return (AMD64_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); return (0); } return (_amd64_vatop(kd, va, pa)); } int _amd64_native(kvm_t *kd __unused) { #ifdef __amd64__ return (1); #else return (0); #endif } static struct kvm_arch kvm_amd64 = { .ka_probe = _amd64_probe, .ka_initvtop = _amd64_initvtop, .ka_freevtop = _amd64_freevtop, .ka_kvatop = _amd64_kvatop, .ka_native = _amd64_native, }; KVM_ARCH(kvm_amd64); Index: head/lib/libkvm/kvm_getloadavg.c =================================================================== --- head/lib/libkvm/kvm_getloadavg.c (revision 334052) +++ head/lib/libkvm/kvm_getloadavg.c (revision 334053) @@ -1,107 +1,102 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); - -#if defined(LIBC_SCCS) && !defined(lint) -#if 0 -static char sccsid[] = "@(#)kvm_getloadavg.c 8.1 (Berkeley) 6/4/93"; -#endif -#endif /* LIBC_SCCS and not lint */ +__SCCSID("@(#)kvm_getloadavg.c 8.1 (Berkeley) 6/4/93"); #include #include #include #include #include #include #include #include "kvm_private.h" static struct nlist nl[] = { { .n_name = "_averunnable" }, #define X_AVERUNNABLE 0 { .n_name = "_fscale" }, #define X_FSCALE 1 { .n_name = "" }, }; /* * kvm_getloadavg() -- Get system load averages, from live or dead kernels. * * Put `nelem' samples into `loadavg' array. * Return number of samples retrieved, or -1 on error. */ int kvm_getloadavg(kvm_t *kd, double loadavg[], int nelem) { struct loadavg loadinfo; struct nlist *p; int fscale, i; if (ISALIVE(kd)) return (getloadavg(loadavg, nelem)); if (!kd->arch->ka_native(kd)) { _kvm_err(kd, kd->program, "cannot read loadavg from non-native core"); return (-1); } if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p); _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (-1); } #define KREAD(kd, addr, obj) \ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj)) if (KREAD(kd, nl[X_AVERUNNABLE].n_value, &loadinfo)) { _kvm_err(kd, kd->program, "can't read averunnable"); return (-1); } /* * Old kernels have fscale separately; if not found assume * running new format. */ if (!KREAD(kd, nl[X_FSCALE].n_value, &fscale)) loadinfo.fscale = fscale; nelem = MIN(nelem, (int)(sizeof(loadinfo.ldavg) / sizeof(fixpt_t))); for (i = 0; i < nelem; i++) loadavg[i] = (double) loadinfo.ldavg[i] / loadinfo.fscale; return (nelem); } Index: head/lib/libkvm/kvm_i386.c =================================================================== --- head/lib/libkvm/kvm_i386.c (revision 334052) +++ head/lib/libkvm/kvm_i386.c (revision 334053) @@ -1,433 +1,428 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); - -#if defined(LIBC_SCCS) && !defined(lint) -#if 0 -static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; -#endif -#endif /* LIBC_SCCS and not lint */ +__SCCSID("@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"); /* * i386 machine dependent routines for kvm. Hopefully, the forthcoming * vm code will one day obsolete this module. */ #include #include #include #include #include #include #include #include #ifdef __i386__ #include /* For KERNBASE. */ #endif #include #include "kvm_private.h" #include "kvm_i386.h" struct vmstate { void *PTD; int pae; size_t phnum; GElf_Phdr *phdr; }; /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { struct vmstate *vm = kd->vmst; GElf_Phdr *p; size_t n; if (kd->rawdump) { *ofs = pa; return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } p = vm->phdr; n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } static void _i386_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; if (vm->PTD) free(vm->PTD); free(vm->phdr); free(vm); kd->vmst = NULL; } static int _i386_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && !_kvm_is_minidump(kd)); } static int _i386_initvtop(kvm_t *kd) { struct kvm_nlist nl[2]; i386_physaddr_t pa; kvaddr_t kernbase; char *PTD; int i; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(struct vmstate)); if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PTD = 0; if (kd->rawdump == 0) { if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { #ifdef __i386__ kernbase = KERNBASE; /* for old kernels */ #else _kvm_err(kd, kd->program, "cannot resolve kernbase"); return (-1); #endif } else kernbase = nl[0].n_value; nl[0].n_name = "IdlePDPT"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) == 0) { i386_physaddr_pae_t pa64; if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePDPT"); return (-1); } pa = le32toh(pa); PTD = _kvm_malloc(kd, 4 * I386_PAGE_SIZE); if (PTD == NULL) { _kvm_err(kd, kd->program, "cannot allocate PTD"); return (-1); } for (i = 0; i < 4; i++) { if (kvm_read2(kd, pa + (i * sizeof(pa64)), &pa64, sizeof(pa64)) != sizeof(pa64)) { _kvm_err(kd, kd->program, "Cannot read PDPT"); free(PTD); return (-1); } pa64 = le64toh(pa64); if (kvm_read2(kd, pa64 & I386_PG_FRAME_PAE, PTD + (i * I386_PAGE_SIZE), I386_PAGE_SIZE) != I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PDPT"); free(PTD); return (-1); } } kd->vmst->PTD = PTD; kd->vmst->pae = 1; } else { nl[0].n_name = "IdlePTD"; nl[1].n_name = 0; if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePTD"); return (-1); } pa = le32toh(pa); PTD = _kvm_malloc(kd, I386_PAGE_SIZE); if (PTD == NULL) { _kvm_err(kd, kd->program, "cannot allocate PTD"); return (-1); } if (kvm_read2(kd, pa, PTD, I386_PAGE_SIZE) != I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PTD"); return (-1); } kd->vmst->PTD = PTD; kd->vmst->pae = 0; } return (0); } static int _i386_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_t offset; i386_physaddr_t pte_pa; i386_pde_t pde; i386_pte_t pte; kvaddr_t pdeindex; kvaddr_t pteindex; size_t s; i386_physaddr_t a; off_t ofs; i386_pde_t *PTD; vm = kd->vmst; PTD = (i386_pde_t *)vm->PTD; offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (PTD == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: bootstrap data not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); } pdeindex = va >> I386_PDRSHIFT; pde = le32toh(PTD[pdeindex]); if ((pde & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_vatop: pde not valid"); goto invalid; } if (pde & I386_PG_PS) { /* * No second-level page table; ptd describes one 4MB * page. (We assume that the kernel wouldn't set * PG_PS without enabling it cr0). */ offset = va & I386_PAGE_PS_MASK; a = (pde & I386_PG_PS_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: 4MB page address not in dump"); goto invalid; } return (I386_NBPDR - offset); } pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG - 1); pte_pa = (pde & I386_PG_FRAME) + (pteindex * sizeof(pte)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_i386_vatop: pte_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_i386_vatop: pread"); goto invalid; } pte = le32toh(pte); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop: pte not valid"); goto invalid; } a = (pte & I386_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: address not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; i386_physaddr_pae_t offset; i386_physaddr_pae_t pte_pa; i386_pde_pae_t pde; i386_pte_pae_t pte; kvaddr_t pdeindex; kvaddr_t pteindex; size_t s; i386_physaddr_pae_t a; off_t ofs; i386_pde_pae_t *PTD; vm = kd->vmst; PTD = (i386_pde_pae_t *)vm->PTD; offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer * not yet set) then return pa == va to avoid infinite recursion. */ if (PTD == NULL) { s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: bootstrap data not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); } pdeindex = va >> I386_PDRSHIFT_PAE; pde = le64toh(PTD[pdeindex]); if ((pde & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop_pae: pde not valid"); goto invalid; } if (pde & I386_PG_PS) { /* * No second-level page table; ptd describes one 2MB * page. (We assume that the kernel wouldn't set * PG_PS without enabling it cr0). */ offset = va & I386_PAGE_PS_MASK_PAE; a = (pde & I386_PG_PS_FRAME_PAE) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop: 2MB page address not in dump"); goto invalid; } return (I386_NBPDR_PAE - offset); } pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG_PAE - 1); pte_pa = (pde & I386_PG_FRAME_PAE) + (pteindex * sizeof(pde)); s = _kvm_pa2off(kd, pte_pa, &ofs); if (s < sizeof(pte)) { _kvm_err(kd, kd->program, "_i386_vatop_pae: pdpe_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { _kvm_syserr(kd, kd->program, "_i386_vatop_pae: read"); goto invalid; } pte = le64toh(pte); if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: pte not valid"); goto invalid; } a = (pte & I386_PG_FRAME_PAE) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, "_i386_vatop_pae: address not in dump"); goto invalid; } else return (I386_PAGE_SIZE - offset); invalid: _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int _i386_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } if (kd->vmst->pae) return (_i386_vatop_pae(kd, va, pa)); else return (_i386_vatop(kd, va, pa)); } int _i386_native(kvm_t *kd __unused) { #ifdef __i386__ return (1); #else return (0); #endif } static struct kvm_arch kvm_i386 = { .ka_probe = _i386_probe, .ka_initvtop = _i386_initvtop, .ka_freevtop = _i386_freevtop, .ka_kvatop = _i386_kvatop, .ka_native = _i386_native, }; KVM_ARCH(kvm_i386); Index: head/lib/libkvm/kvm_proc.c =================================================================== --- head/lib/libkvm/kvm_proc.c (revision 334052) +++ head/lib/libkvm/kvm_proc.c (revision 334053) @@ -1,747 +1,742 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#if 0 -#if defined(LIBC_SCCS) && !defined(lint) -static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93"; -#endif /* LIBC_SCCS and not lint */ -#endif - #include __FBSDID("$FreeBSD$"); +__SCCSID("@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93"); /* * Proc traversal interface for kvm. ps and w are (probably) the exclusive * users of this code, so we've factored it out into a separate module. * Thus, we keep this grunge out of the other kvm applications (i.e., * most other applications are interested only in open/close/read/nlist). */ #include #define _WANT_UCRED /* make ucred.h give us 'struct ucred' */ #include #include #include #include #include #include #include #include #define _WANT_PRISON /* make jail.h give us 'struct prison' */ #include #include #include #include #include #include #include #include #define _WANT_KW_EXITCODE #include #include #include #include #include #include #include #include #include #include #include "kvm_private.h" #define KREAD(kd, addr, obj) \ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj)) static int ticks; static int hz; static uint64_t cpu_tick_frequency; /* * From sys/kern/kern_tc.c. Depends on cpu_tick_frequency, which is * read/initialized before this function is ever called. */ static uint64_t cputick2usec(uint64_t tick) { if (cpu_tick_frequency == 0) return (0); if (tick > 18446744073709551) /* floor(2^64 / 1000) */ return (tick / (cpu_tick_frequency / 1000000)); else if (tick > 18446744073709) /* floor(2^64 / 1000000) */ return ((tick * 1000) / (cpu_tick_frequency / 1000)); else return ((tick * 1000000) / cpu_tick_frequency); } /* * Read proc's from memory file into buffer bp, which has space to hold * at most maxcnt procs. */ static int kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, struct kinfo_proc *bp, int maxcnt) { int cnt = 0; struct kinfo_proc kinfo_proc, *kp; struct pgrp pgrp; struct session sess; struct cdev t_cdev; struct tty tty; struct vmspace vmspace; struct sigacts sigacts; #if 0 struct pstats pstats; #endif struct ucred ucred; struct prison pr; struct thread mtd; struct proc proc; struct proc pproc; struct sysentvec sysent; char svname[KI_EMULNAMELEN]; kp = &kinfo_proc; kp->ki_structsize = sizeof(kinfo_proc); /* * Loop on the processes. this is completely broken because we need to be * able to loop on the threads and merge the ones that are the same process some how. */ for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) { memset(kp, 0, sizeof *kp); if (KREAD(kd, (u_long)p, &proc)) { _kvm_err(kd, kd->program, "can't read proc at %p", p); return (-1); } if (proc.p_state == PRS_NEW) continue; if (proc.p_state != PRS_ZOMBIE) { if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads), &mtd)) { _kvm_err(kd, kd->program, "can't read thread at %p", TAILQ_FIRST(&proc.p_threads)); return (-1); } } if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) { kp->ki_ruid = ucred.cr_ruid; kp->ki_svuid = ucred.cr_svuid; kp->ki_rgid = ucred.cr_rgid; kp->ki_svgid = ucred.cr_svgid; kp->ki_cr_flags = ucred.cr_flags; if (ucred.cr_ngroups > KI_NGROUPS) { kp->ki_ngroups = KI_NGROUPS; kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW; } else kp->ki_ngroups = ucred.cr_ngroups; kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups, kp->ki_ngroups * sizeof(gid_t)); kp->ki_uid = ucred.cr_uid; if (ucred.cr_prison != NULL) { if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) { _kvm_err(kd, kd->program, "can't read prison at %p", ucred.cr_prison); return (-1); } kp->ki_jid = pr.pr_id; } } switch(what & ~KERN_PROC_INC_THREAD) { case KERN_PROC_GID: if (kp->ki_groups[0] != (gid_t)arg) continue; break; case KERN_PROC_PID: if (proc.p_pid != (pid_t)arg) continue; break; case KERN_PROC_RGID: if (kp->ki_rgid != (gid_t)arg) continue; break; case KERN_PROC_UID: if (kp->ki_uid != (uid_t)arg) continue; break; case KERN_PROC_RUID: if (kp->ki_ruid != (uid_t)arg) continue; break; } /* * We're going to add another proc to the set. If this * will overflow the buffer, assume the reason is because * nprocs (or the proc list) is corrupt and declare an error. */ if (cnt >= maxcnt) { _kvm_err(kd, kd->program, "nprocs corrupt"); return (-1); } /* * gather kinfo_proc */ kp->ki_paddr = p; kp->ki_addr = 0; /* XXX uarea */ /* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */ kp->ki_args = proc.p_args; kp->ki_tracep = proc.p_tracevp; kp->ki_textvp = proc.p_textvp; kp->ki_fd = proc.p_fd; kp->ki_vmspace = proc.p_vmspace; if (proc.p_sigacts != NULL) { if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) { _kvm_err(kd, kd->program, "can't read sigacts at %p", proc.p_sigacts); return (-1); } kp->ki_sigignore = sigacts.ps_sigignore; kp->ki_sigcatch = sigacts.ps_sigcatch; } #if 0 if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) { if (KREAD(kd, (u_long)proc.p_stats, &pstats)) { _kvm_err(kd, kd->program, "can't read stats at %x", proc.p_stats); return (-1); } kp->ki_start = pstats.p_start; /* * XXX: The times here are probably zero and need * to be calculated from the raw data in p_rux and * p_crux. */ kp->ki_rusage = pstats.p_ru; kp->ki_childstime = pstats.p_cru.ru_stime; kp->ki_childutime = pstats.p_cru.ru_utime; /* Some callers want child-times in a single value */ timeradd(&kp->ki_childstime, &kp->ki_childutime, &kp->ki_childtime); } #endif if (proc.p_oppid) kp->ki_ppid = proc.p_oppid; else if (proc.p_pptr) { if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) { _kvm_err(kd, kd->program, "can't read pproc at %p", proc.p_pptr); return (-1); } kp->ki_ppid = pproc.p_pid; } else kp->ki_ppid = 0; if (proc.p_pgrp == NULL) goto nopgrp; if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read pgrp at %p", proc.p_pgrp); return (-1); } kp->ki_pgid = pgrp.pg_id; kp->ki_jobc = pgrp.pg_jobc; if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { _kvm_err(kd, kd->program, "can't read session at %p", pgrp.pg_session); return (-1); } kp->ki_sid = sess.s_sid; (void)memcpy(kp->ki_login, sess.s_login, sizeof(kp->ki_login)); kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0; if (sess.s_leader == p) kp->ki_kiflag |= KI_SLEADER; if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) { if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { _kvm_err(kd, kd->program, "can't read tty at %p", sess.s_ttyp); return (-1); } if (tty.t_dev != NULL) { if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) { _kvm_err(kd, kd->program, "can't read cdev at %p", tty.t_dev); return (-1); } #if 0 kp->ki_tdev = t_cdev.si_udev; #else kp->ki_tdev = NODEV; #endif } if (tty.t_pgrp != NULL) { if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read tpgrp at %p", tty.t_pgrp); return (-1); } kp->ki_tpgid = pgrp.pg_id; } else kp->ki_tpgid = -1; if (tty.t_session != NULL) { if (KREAD(kd, (u_long)tty.t_session, &sess)) { _kvm_err(kd, kd->program, "can't read session at %p", tty.t_session); return (-1); } kp->ki_tsid = sess.s_sid; } } else { nopgrp: kp->ki_tdev = NODEV; } if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg) (void)kvm_read(kd, (u_long)mtd.td_wmesg, kp->ki_wmesg, WMESGLEN); (void)kvm_read(kd, (u_long)proc.p_vmspace, (char *)&vmspace, sizeof(vmspace)); kp->ki_size = vmspace.vm_map.size; /* * Approximate the kernel's method of calculating * this field. */ #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap); kp->ki_swrss = vmspace.vm_swrss; kp->ki_tsize = vmspace.vm_tsize; kp->ki_dsize = vmspace.vm_dsize; kp->ki_ssize = vmspace.vm_ssize; switch (what & ~KERN_PROC_INC_THREAD) { case KERN_PROC_PGRP: if (kp->ki_pgid != (pid_t)arg) continue; break; case KERN_PROC_SESSION: if (kp->ki_sid != (pid_t)arg) continue; break; case KERN_PROC_TTY: if ((proc.p_flag & P_CONTROLT) == 0 || kp->ki_tdev != (dev_t)arg) continue; break; } if (proc.p_comm[0] != 0) strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN); (void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent, sizeof(sysent)); (void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname, sizeof(svname)); if (svname[0] != 0) strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN); if ((proc.p_state != PRS_ZOMBIE) && (mtd.td_blocked != 0)) { kp->ki_kiflag |= KI_LOCKBLOCK; if (mtd.td_lockname) (void)kvm_read(kd, (u_long)mtd.td_lockname, kp->ki_lockname, LOCKNAMELEN); kp->ki_lockname[LOCKNAMELEN] = 0; } kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime); kp->ki_pid = proc.p_pid; kp->ki_siglist = proc.p_siglist; SIGSETOR(kp->ki_siglist, mtd.td_siglist); kp->ki_sigmask = mtd.td_sigmask; kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig); kp->ki_acflag = proc.p_acflag; kp->ki_lock = proc.p_lock; if (proc.p_state != PRS_ZOMBIE) { kp->ki_swtime = (ticks - proc.p_swtick) / hz; kp->ki_flag = proc.p_flag; kp->ki_sflag = 0; kp->ki_nice = proc.p_nice; kp->ki_traceflag = proc.p_traceflag; if (proc.p_state == PRS_NORMAL) { if (TD_ON_RUNQ(&mtd) || TD_CAN_RUN(&mtd) || TD_IS_RUNNING(&mtd)) { kp->ki_stat = SRUN; } else if (mtd.td_state == TDS_INHIBITED) { if (P_SHOULDSTOP(&proc)) { kp->ki_stat = SSTOP; } else if ( TD_IS_SLEEPING(&mtd)) { kp->ki_stat = SSLEEP; } else if (TD_ON_LOCK(&mtd)) { kp->ki_stat = SLOCK; } else { kp->ki_stat = SWAIT; } } } else { kp->ki_stat = SIDL; } /* Stuff from the thread */ kp->ki_pri.pri_level = mtd.td_priority; kp->ki_pri.pri_native = mtd.td_base_pri; kp->ki_lastcpu = mtd.td_lastcpu; kp->ki_wchan = mtd.td_wchan; kp->ki_oncpu = mtd.td_oncpu; if (mtd.td_name[0] != '\0') strlcpy(kp->ki_tdname, mtd.td_name, sizeof(kp->ki_tdname)); kp->ki_pctcpu = 0; kp->ki_rqindex = 0; /* * Note: legacy fields; wraps at NO_CPU_OLD or the * old max CPU value as appropriate */ if (mtd.td_lastcpu == NOCPU) kp->ki_lastcpu_old = NOCPU_OLD; else if (mtd.td_lastcpu > MAXCPU_OLD) kp->ki_lastcpu_old = MAXCPU_OLD; else kp->ki_lastcpu_old = mtd.td_lastcpu; if (mtd.td_oncpu == NOCPU) kp->ki_oncpu_old = NOCPU_OLD; else if (mtd.td_oncpu > MAXCPU_OLD) kp->ki_oncpu_old = MAXCPU_OLD; else kp->ki_oncpu_old = mtd.td_oncpu; } else { kp->ki_stat = SZOMB; } kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */ bcopy(&kinfo_proc, bp, sizeof(kinfo_proc)); ++bp; ++cnt; } return (cnt); } /* * Build proc info array by reading in proc list from a crash dump. * Return number of procs read. maxcnt is the max we will read. */ static int kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc, u_long a_zombproc, int maxcnt) { struct kinfo_proc *bp = kd->procbase; int acnt, zcnt; struct proc *p; if (KREAD(kd, a_allproc, &p)) { _kvm_err(kd, kd->program, "cannot read allproc"); return (-1); } acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt); if (acnt < 0) return (acnt); if (KREAD(kd, a_zombproc, &p)) { _kvm_err(kd, kd->program, "cannot read zombproc"); return (-1); } zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt); if (zcnt < 0) zcnt = 0; return (acnt + zcnt); } struct kinfo_proc * kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt) { int mib[4], st, nprocs; size_t size, osize; int temp_op; if (kd->procbase != 0) { free((void *)kd->procbase); /* * Clear this pointer in case this call fails. Otherwise, * kvm_close() will free it again. */ kd->procbase = 0; } if (ISALIVE(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = op; mib[3] = arg; temp_op = op & ~KERN_PROC_INC_THREAD; st = sysctl(mib, temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ? 3 : 4, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } /* * We can't continue with a size of 0 because we pass * it to realloc() (via _kvm_realloc()), and passing 0 * to realloc() results in undefined behavior. */ if (size == 0) { /* * XXX: We should probably return an invalid, * but non-NULL, pointer here so any client * program trying to dereference it will * crash. However, _kvm_freeprocs() calls * free() on kd->procbase if it isn't NULL, * and free()'ing a junk pointer isn't good. * Then again, _kvm_freeprocs() isn't used * anywhere . . . */ kd->procbase = _kvm_malloc(kd, 1); goto liveout; } do { size += size / 10; kd->procbase = (struct kinfo_proc *) _kvm_realloc(kd, kd->procbase, size); if (kd->procbase == NULL) return (0); osize = size; st = sysctl(mib, temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ? 3 : 4, kd->procbase, &size, NULL, 0); } while (st == -1 && errno == ENOMEM && size == osize); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } /* * We have to check the size again because sysctl() * may "round up" oldlenp if oldp is NULL; hence it * might've told us that there was data to get when * there really isn't any. */ if (size > 0 && kd->procbase->ki_structsize != sizeof(struct kinfo_proc)) { _kvm_err(kd, kd->program, "kinfo_proc size mismatch (expected %zu, got %d)", sizeof(struct kinfo_proc), kd->procbase->ki_structsize); return (0); } liveout: nprocs = size == 0 ? 0 : size / kd->procbase->ki_structsize; } else { struct nlist nl[7], *p; nl[0].n_name = "_nprocs"; nl[1].n_name = "_allproc"; nl[2].n_name = "_zombproc"; nl[3].n_name = "_ticks"; nl[4].n_name = "_hz"; nl[5].n_name = "_cpu_tick_frequency"; nl[6].n_name = 0; if (!kd->arch->ka_native(kd)) { _kvm_err(kd, kd->program, "cannot read procs from non-native core"); return (0); } if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (0); } if (KREAD(kd, nl[0].n_value, &nprocs)) { _kvm_err(kd, kd->program, "can't read nprocs"); return (0); } if (KREAD(kd, nl[3].n_value, &ticks)) { _kvm_err(kd, kd->program, "can't read ticks"); return (0); } if (KREAD(kd, nl[4].n_value, &hz)) { _kvm_err(kd, kd->program, "can't read hz"); return (0); } if (KREAD(kd, nl[5].n_value, &cpu_tick_frequency)) { _kvm_err(kd, kd->program, "can't read cpu_tick_frequency"); return (0); } size = nprocs * sizeof(struct kinfo_proc); kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size); if (kd->procbase == NULL) return (0); nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, nl[2].n_value, nprocs); if (nprocs <= 0) { _kvm_freeprocs(kd); nprocs = 0; } #ifdef notdef else { size = nprocs * sizeof(struct kinfo_proc); kd->procbase = realloc(kd->procbase, size); } #endif } *cnt = nprocs; return (kd->procbase); } void _kvm_freeprocs(kvm_t *kd) { free(kd->procbase); kd->procbase = NULL; } void * _kvm_realloc(kvm_t *kd, void *p, size_t n) { void *np; np = reallocf(p, n); if (np == NULL) _kvm_err(kd, kd->program, "out of memory"); return (np); } /* * Get the command args or environment. */ static char ** kvm_argv(kvm_t *kd, const struct kinfo_proc *kp, int env, int nchr) { int oid[4]; int i; size_t bufsz; static int buflen; static char *buf, *p; static char **bufp; static int argc; char **nbufp; if (!ISALIVE(kd)) { _kvm_err(kd, kd->program, "cannot read user space from dead kernel"); return (NULL); } if (nchr == 0 || nchr > ARG_MAX) nchr = ARG_MAX; if (buflen == 0) { buf = malloc(nchr); if (buf == NULL) { _kvm_err(kd, kd->program, "cannot allocate memory"); return (NULL); } argc = 32; bufp = malloc(sizeof(char *) * argc); if (bufp == NULL) { free(buf); buf = NULL; _kvm_err(kd, kd->program, "cannot allocate memory"); return (NULL); } buflen = nchr; } else if (nchr > buflen) { p = realloc(buf, nchr); if (p != NULL) { buf = p; buflen = nchr; } } oid[0] = CTL_KERN; oid[1] = KERN_PROC; oid[2] = env ? KERN_PROC_ENV : KERN_PROC_ARGS; oid[3] = kp->ki_pid; bufsz = buflen; if (sysctl(oid, 4, buf, &bufsz, 0, 0) == -1) { /* * If the supplied buf is too short to hold the requested * value the sysctl returns with ENOMEM. The buf is filled * with the truncated value and the returned bufsz is equal * to the requested len. */ if (errno != ENOMEM || bufsz != (size_t)buflen) return (NULL); buf[bufsz - 1] = '\0'; errno = 0; } else if (bufsz == 0) return (NULL); i = 0; p = buf; do { bufp[i++] = p; p += strlen(p) + 1; if (i >= argc) { argc += argc; nbufp = realloc(bufp, sizeof(char *) * argc); if (nbufp == NULL) return (NULL); bufp = nbufp; } } while (p < buf + bufsz); bufp[i++] = 0; return (bufp); } char ** kvm_getargv(kvm_t *kd, const struct kinfo_proc *kp, int nchr) { return (kvm_argv(kd, kp, 0, nchr)); } char ** kvm_getenvv(kvm_t *kd, const struct kinfo_proc *kp, int nchr) { return (kvm_argv(kd, kp, 1, nchr)); } Index: head/lib/libkvm/kvm_sparc64.c =================================================================== --- head/lib/libkvm/kvm_sparc64.c (revision 334052) +++ head/lib/libkvm/kvm_sparc64.c (revision 334053) @@ -1,244 +1,239 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1989, 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software developed by the Computer Systems * Engineering group at Lawrence Berkeley Laboratory under DARPA contract * BG 91-66 and contributed to Berkeley. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/lib/libkvm/kvm_i386.c,v 1.15 2001/10/10 17:48:43 */ #include __FBSDID("$FreeBSD$"); - -#if defined(LIBC_SCCS) && !defined(lint) -#if 0 -static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; -#endif -#endif /* LIBC_SCCS and not lint */ +__SCCSID("@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"); /* * sparc64 machine dependent routines for kvm. */ #include #include #include #include #include #include #include "../../sys/sparc64/include/kerneldump.h" #include "kvm_private.h" #include "kvm_sparc64.h" struct vmstate { off_t vm_tsb_off; uint64_t vm_tsb_mask; int vm_nregions; struct sparc64_dump_reg *vm_regions; }; static int _sparc64_probe(kvm_t *kd) { return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_SPARCV9)); } static void _sparc64_freevtop(kvm_t *kd) { free(kd->vmst->vm_regions); free(kd->vmst); kd->vmst = NULL; } static int _sparc64_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size) { /* XXX This has to be a raw file read, kvm_read is virtual. */ if (pread(kd->pmfd, buf, size, pos) != (ssize_t)size) { _kvm_syserr(kd, kd->program, "_sparc64_read_phys: pread"); return (0); } return (1); } static int _sparc64_reg_cmp(const void *a, const void *b) { const struct sparc64_dump_reg *ra, *rb; ra = a; rb = b; if (ra->dr_pa < rb->dr_pa) return (-1); else if (ra->dr_pa >= rb->dr_pa + rb->dr_size) return (1); else return (0); } #define KVM_OFF_NOTFOUND 0 static off_t _sparc64_find_off(struct vmstate *vm, uint64_t pa, uint64_t size) { struct sparc64_dump_reg *reg, key; vm_offset_t o; key.dr_pa = pa; reg = bsearch(&key, vm->vm_regions, vm->vm_nregions, sizeof(*vm->vm_regions), _sparc64_reg_cmp); if (reg == NULL) return (KVM_OFF_NOTFOUND); o = pa - reg->dr_pa; if (o + size > reg->dr_size) return (KVM_OFF_NOTFOUND); return (reg->dr_offs + o); } static int _sparc64_initvtop(kvm_t *kd) { struct sparc64_dump_hdr hdr; struct sparc64_dump_reg *regs; struct vmstate *vm; size_t regsz; int i; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; if (!_sparc64_read_phys(kd, 0, &hdr, sizeof(hdr))) goto fail_vm; hdr.dh_hdr_size = be64toh(hdr.dh_hdr_size); hdr.dh_tsb_pa = be64toh(hdr.dh_tsb_pa); hdr.dh_tsb_size = be64toh(hdr.dh_tsb_size); hdr.dh_tsb_mask = be64toh(hdr.dh_tsb_mask); hdr.dh_nregions = be32toh(hdr.dh_nregions); regsz = hdr.dh_nregions * sizeof(*regs); regs = _kvm_malloc(kd, regsz); if (regs == NULL) { _kvm_err(kd, kd->program, "cannot allocate regions"); goto fail_vm; } if (!_sparc64_read_phys(kd, sizeof(hdr), regs, regsz)) goto fail_regs; for (i = 0; i < hdr.dh_nregions; i++) { regs[i].dr_pa = be64toh(regs[i].dr_pa); regs[i].dr_size = be64toh(regs[i].dr_size); regs[i].dr_offs = be64toh(regs[i].dr_offs); } qsort(regs, hdr.dh_nregions, sizeof(*regs), _sparc64_reg_cmp); vm->vm_tsb_mask = hdr.dh_tsb_mask; vm->vm_regions = regs; vm->vm_nregions = hdr.dh_nregions; vm->vm_tsb_off = _sparc64_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) { _kvm_err(kd, kd->program, "tsb not found in dump"); goto fail_regs; } return (0); fail_regs: free(regs); fail_vm: free(vm); return (-1); } static int _sparc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct sparc64_tte tte; off_t tte_off; kvaddr_t vpn; off_t pa_off; kvaddr_t pg_off; int rest; pg_off = va & SPARC64_PAGE_MASK; if (va >= SPARC64_MIN_DIRECT_ADDRESS) pa_off = SPARC64_DIRECT_TO_PHYS(va) & ~SPARC64_PAGE_MASK; else { vpn = va >> SPARC64_PAGE_SHIFT; tte_off = kd->vmst->vm_tsb_off + ((vpn & kd->vmst->vm_tsb_mask) << SPARC64_TTE_SHIFT); if (!_sparc64_read_phys(kd, tte_off, &tte, sizeof(tte))) goto invalid; tte.tte_vpn = be64toh(tte.tte_vpn); tte.tte_data = be64toh(tte.tte_data); if (!sparc64_tte_match(&tte, va)) goto invalid; pa_off = SPARC64_TTE_GET_PA(&tte); } rest = SPARC64_PAGE_SIZE - pg_off; pa_off = _sparc64_find_off(kd->vmst, pa_off, rest); if (pa_off == KVM_OFF_NOTFOUND) goto invalid; *pa = pa_off + pg_off; return (rest); invalid: _kvm_err(kd, 0, "invalid address (%jx)", (uintmax_t)va); return (0); } static int _sparc64_native(kvm_t *kd __unused) { #ifdef __sparc64__ return (1); #else return (0); #endif } static struct kvm_arch kvm_sparc64 = { .ka_probe = _sparc64_probe, .ka_initvtop = _sparc64_initvtop, .ka_freevtop = _sparc64_freevtop, .ka_kvatop = _sparc64_kvatop, .ka_native = _sparc64_native, }; KVM_ARCH(kvm_sparc64);