Index: Makefile.inc1 =================================================================== --- Makefile.inc1 +++ Makefile.inc1 @@ -1819,6 +1819,7 @@ .endif lib/libgeom__L: lib/libexpat__L +lib/libkvm__L: lib/libelf__L .if ${MK_LIBTHR} != "no" _lib_libthr= lib/libthr Index: gnu/usr.bin/gdb/kgdb/Makefile =================================================================== --- gnu/usr.bin/gdb/kgdb/Makefile +++ gnu/usr.bin/gdb/kgdb/Makefile @@ -9,11 +9,7 @@ GDBLIBS= ${OBJ_GDB}/libgdb/libgdb.a DPADD= ${GDBLIBS} ${BULIBS} ${LIBKVM} -LDADD= ${GDBLIBS} ${BULIBS} -lkvm${GDB_SUFFIX} +LDADD= ${GDBLIBS} ${BULIBS} -lkvm LIBADD+= m readline ncursesw gnuregex -.if defined(GDB_CROSS_DEBUGGER) -CFLAGS+= -Wl,-export-dynamic -.endif - .include Index: gnu/usr.bin/gdb/kgdb/main.c =================================================================== --- gnu/usr.bin/gdb/kgdb/main.c +++ gnu/usr.bin/gdb/kgdb/main.c @@ -41,9 +41,6 @@ #include #include #include -#ifdef CROSS_DEBUGGER -#include -#endif #include #include #include @@ -81,24 +78,6 @@ static void (*kgdb_new_objfile_chain)(struct objfile * objfile); -#ifdef CROSS_DEBUGGER -ps_err_e -ps_pglobal_lookup(struct ps_prochandle *ph, const char *obj, const char *name, - psaddr_t *sym_addr) -{ - struct minimal_symbol *ms; - CORE_ADDR addr; - - ms = lookup_minimal_symbol (name, NULL, NULL); - if (ms == NULL) - return PS_NOSYM; - - addr = SYMBOL_VALUE_ADDRESS (ms); - store_typed_address(sym_addr, builtin_type_void_data_ptr, addr); - return PS_OK; -} -#endif - static void usage(void) { Index: gnu/usr.bin/gdb/kgdb/trgt.c =================================================================== --- gnu/usr.bin/gdb/kgdb/trgt.c +++ gnu/usr.bin/gdb/kgdb/trgt.c @@ -78,6 +78,19 @@ #define KERNOFF (kgdb_kernbase ()) #define PINKERNEL(x) ((x) >= KERNOFF) +static int +kgdb_resolve_symbol(const char *name, kvaddr_t *kva) +{ + struct minimal_symbol *ms; + + ms = lookup_minimal_symbol (name, NULL, NULL); + if (ms == NULL) + return (1); + + *kva = SYMBOL_VALUE_ADDRESS (ms); + return (0);; +} + static CORE_ADDR kgdb_kernbase (void) { @@ -120,8 +133,8 @@ old_chain = make_cleanup (xfree, filename); - nkvm = kvm_openfiles(bfd_get_filename(exec_bfd), filename, NULL, - write_files ? O_RDWR : O_RDONLY, kvm_err); + nkvm = kvm_open2(bfd_get_filename(exec_bfd), filename, + write_files ? O_RDWR : O_RDONLY, kvm_err, kgdb_resolve_symbol); if (nkvm == NULL) error ("Failed to open vmcore: %s", kvm_err); @@ -254,7 +267,7 @@ if (len == 0) return (0); if (!write) - return (kvm_read(kvm, memaddr, myaddr, len)); + return (kvm_read2(kvm, memaddr, myaddr, len)); else return (kvm_write(kvm, memaddr, myaddr, len)); } Index: lib/libkvm/Makefile =================================================================== --- lib/libkvm/Makefile +++ lib/libkvm/Makefile @@ -1,50 +1,39 @@ # @(#)Makefile 8.1 (Berkeley) 6/4/93 # $FreeBSD$ -.if defined(TARGET_ARCH) && !defined(COMPAT_32BIT) -KVM_XARCH=${TARGET_ARCH} -KVM_XCPUARCH=${KVM_XARCH:C/mips(n32|64)?(el)?/mips/:C/arm(v6)?(eb|hf)?/arm/:C/powerpc64/powerpc/} -.else -KVM_XARCH=${MACHINE_ARCH} -KVM_XCPUARCH=${MACHINE_CPUARCH} -.endif - -.if ${KVM_XARCH} != ${MACHINE_ARCH} -LIB= kvm-${KVM_XARCH} -CFLAGS+=-DCROSS_LIBKVM -.else LIB= kvm -.endif SHLIBDIR?= /lib SHLIB_MAJOR= 6 CFLAGS+=-DLIBC_SCCS -I${.CURDIR} -.if exists(${.CURDIR}/kvm_${KVM_XARCH}.c) -KVM_ARCH=${KVM_XARCH} -.else -KVM_ARCH=${KVM_XCPUARCH} -.endif - WARNS?= 3 -SRCS= kvm.c kvm_${KVM_ARCH}.c kvm_cptime.c kvm_file.c kvm_getloadavg.c \ - kvm_getswapinfo.c kvm_pcpu.c kvm_proc.c kvm_vnet.c -.if exists(${.CURDIR}/kvm_minidump_${KVM_ARCH}.c) -SRCS+= kvm_minidump_${KVM_ARCH}.c -.endif +# XXX: Unused and to be removed: kvm_aarch64.c kvm_mips.c kvm_sparc.c +SRCS= kvm.c kvm_cptime.c kvm_file.c kvm_getloadavg.c \ + kvm_getswapinfo.c kvm_pcpu.c kvm_proc.c kvm_vnet.c \ + kvm_minidump_aarch64.c \ + kvm_amd64.c kvm_minidump_amd64.c \ + kvm_arm.c kvm_minidump_arm.c \ + kvm_i386.c kvm_minidump_i386.c \ + kvm_minidump_mips.c \ + kvm_powerpc.c kvm_powerpc64.c \ + kvm_sparc64.c INCS= kvm.h +LIBADD= elf + MAN= kvm.3 kvm_getcptime.3 kvm_geterr.3 kvm_getfiles.3 kvm_getloadavg.3 \ - kvm_getpcpu.3 kvm_getprocs.3 kvm_getswapinfo.3 kvm_nlist.3 kvm_open.3 \ - kvm_read.3 + kvm_getpcpu.3 kvm_getprocs.3 kvm_getswapinfo.3 kvm_native.3 \ + kvm_nlist.3 kvm_open.3 kvm_read.3 MLINKS+=kvm_getpcpu.3 kvm_getmaxcpu.3 \ kvm_getpcpu.3 kvm_dpcpu_setcpu.3 \ kvm_getpcpu.3 kvm_read_zpcpu.3 \ kvm_getpcpu.3 kvm_counter_u64_fetch.3 MLINKS+=kvm_getprocs.3 kvm_getargv.3 kvm_getprocs.3 kvm_getenvv.3 -MLINKS+=kvm_open.3 kvm_close.3 kvm_open.3 kvm_openfiles.3 -MLINKS+=kvm_read.3 kvm_write.3 +MLINKS+=kvm_nlist.3 kvm_nlist2.3 +MLINKS+=kvm_open.3 kvm_close.3 kvm_open.3 kvm_open2.3 kvm_open.3 kvm_openfiles.3 +MLINKS+=kvm_read.3 kvm_read2.3 kvm_read.3 kvm_write.3 .include Index: lib/libkvm/Makefile.depend =================================================================== --- lib/libkvm/Makefile.depend +++ lib/libkvm/Makefile.depend @@ -9,6 +9,7 @@ lib/${CSU_DIR} \ lib/libc \ lib/libcompiler_rt \ + lib/libelf \ .include Index: lib/libkvm/kvm.h =================================================================== --- lib/libkvm/kvm.h +++ lib/libkvm/kvm.h @@ -51,6 +51,14 @@ #define _SSIZE_T_DECLARED #endif +typedef uint64_t kvaddr_t; /* An address in a target image. */ + +struct kvm_nlist { + const char *n_name; + unsigned char n_type; + kvaddr_t n_value; +}; + typedef struct __kvm kvm_t; struct kinfo_proc; @@ -83,13 +91,19 @@ struct kinfo_proc * kvm_getprocs(kvm_t *, int, int, int *); int kvm_getswapinfo(kvm_t *, struct kvm_swap *, int, int); +int kvm_native(kvm_t *); int kvm_nlist(kvm_t *, struct nlist *); +int kvm_nlist2(kvm_t *, struct kvm_nlist *); kvm_t *kvm_open (const char *, const char *, const char *, int, const char *); kvm_t *kvm_openfiles (const char *, const char *, const char *, int, char *); +kvm_t *kvm_open2 + (const char *, const char *, int, char *, + int (*)(const char *, kvaddr_t *)); ssize_t kvm_read(kvm_t *, unsigned long, void *, size_t); ssize_t kvm_read_zpcpu(kvm_t *, unsigned long, void *, size_t, int); +ssize_t kvm_read2(kvm_t *, kvaddr_t, void *, size_t); ssize_t kvm_write(kvm_t *, unsigned long, const void *, size_t); __END_DECLS Index: lib/libkvm/kvm.3 =================================================================== --- lib/libkvm/kvm.3 +++ lib/libkvm/kvm.3 @@ -101,6 +101,44 @@ to return (not print out) the error message corresponding to the most recent error condition on the given descriptor. +.Sh CROSS DEBUGGING +The +.Nm +library supports inspection of crash dumps from non-native kernels. +Only a limited subset of the kvm interface is supported for these dumps. +To inspect a crash dump of a non-native kernel, +the caller must provide a +.Fa resolver +function when opening a descriptor via +.Fn kvm_open2 . +In addition, +the kvm interface defines an integer type +.Pq Vt kvaddr_t +that is large enough to hold all valid addresses of all supported +architectures. +The interface also defines a new namelist structure type +.Pq Vt "struct kvm_nlist" +for use with +.Fn kvm_nlist2 . +To avoid address truncation issues, +the caller should use +.Fn kvm_nlist2 +and +.Fn kvm_read2 +in place of +.Fn kvm_nlist +and +.Fn kvm_read , +respectively. +Finally, only a limited subset of operations are supported for non-native +crash dumps: +.Fn kvm_close , +.Fn kvm_geterr +.Fn kvm_open2 , +.Fn kvm_native , +.Fn kvm_nlist2 , +and +.Fn kvm_read2 . .Sh SEE ALSO .Xr kvm_close 3 , .Xr kvm_getargv 3 , @@ -110,10 +148,14 @@ .Xr kvm_getloadavg 3 , .Xr kvm_getprocs 3 , .Xr kvm_getswapinfo 3 , +.Xr kvm_native 3 , .Xr kvm_nlist 3 , +.Xr kvm_nlist2 3 , .Xr kvm_open 3 , +.Xr kvm_open2 3 , .Xr kvm_openfiles 3 , .Xr kvm_read 3 , +.Xr kvm_read2 3 , .Xr kvm_write 3 , .Xr sysctl 3 , .Xr kmem 4 , Index: lib/libkvm/kvm.c =================================================================== --- lib/libkvm/kvm.c +++ lib/libkvm/kvm.c @@ -41,62 +41,63 @@ #endif /* LIBC_SCCS and not lint */ #include +#include #define _WANT_VNET #include -#include -#include -#include -#include #include #include +#include #include -#include -#include - -#include - -#include #include #include #include -#include #include +#include #include #include #include -#include #include #include "kvm_private.h" -#ifndef CROSS_LIBKVM +SET_DECLARE(kvm_arch, struct kvm_arch); /* from src/lib/libc/gen/nlist.c */ int __fdnlist(int, struct nlist *); -#define kvm_fdnlist __fdnlist - -#else - -#include - static int -kvm_fdnlist(int fd, struct nlist *list) +kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list) { - psaddr_t addr; - ps_err_e pserr; - int nfail; + kvaddr_t addr; + int error, nfail; + + if (kd->resolve_symbol == NULL) { + struct nlist *nl; + int count, i; + + for (count = 0; list[count].n_name != NULL && + list[count].n_name[0] != '\0'; count++) + ; + nl = calloc(count + 1, sizeof(*nl)); + for (i = 0; i < count; i++) + nl[i].n_name = list[i].n_name; + nfail = __fdnlist(kd->nlfd, nl); + for (i = 0; i < count; i++) { + list[i].n_type = nl[i].n_type; + list[i].n_value = nl[i].n_value; + } + free(nl); + return (nfail); + } nfail = 0; while (list->n_name != NULL && list->n_name[0] != '\0') { - list->n_other = 0; - list->n_desc = 0; - pserr = ps_pglobal_lookup(NULL, NULL, list->n_name, &addr); - if (pserr != PS_OK) { + error = kd->resolve_symbol(list->n_name, &addr); + if (error != 0) { nfail++; list->n_value = 0; list->n_type = 0; @@ -109,8 +110,6 @@ return (nfail); } -#endif /* CROSS_LIBKVM */ - char * kvm_geterr(kvm_t *kd) { @@ -175,9 +174,206 @@ return (p); } +static int +_kvm_read_kernel_ehdr(kvm_t *kd) +{ + Elf *elf; + + if (elf_version(EV_CURRENT) == EV_NONE) { + _kvm_err(kd, kd->program, "Unsupported libelf"); + return (-1); + } + elf = elf_begin(kd->nlfd, ELF_C_READ, NULL); + if (elf == NULL) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + return (-1); + } + if (elf_kind(elf) != ELF_K_ELF) { + _kvm_err(kd, kd->program, "kernel is not an ELF file"); + return (-1); + } + if (gelf_getehdr(elf, &kd->nlehdr) == NULL) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + elf_end(elf); + return (-1); + } + elf_end(elf); + + switch (kd->nlehdr.e_ident[EI_DATA]) { + case ELFDATA2LSB: + case ELFDATA2MSB: + return (0); + default: + _kvm_err(kd, kd->program, + "unsupported ELF data encoding for kernel"); + return (-1); + } +} + +int +_kvm_probe_elf_kernel(kvm_t *kd, int class, int machine) +{ + + return (kd->nlehdr.e_ident[EI_CLASS] == class && + kd->nlehdr.e_type == ET_EXEC && + kd->nlehdr.e_machine == machine); +} + +int +_kvm_is_minidump(kvm_t *kd) +{ + char minihdr[8]; + + if (kd->rawdump) + return (0); + if (pread(kd->pmfd, &minihdr, 8, 0) == 8 && + memcmp(&minihdr, "minidump", 8) == 0) + return (1); + return (0); +} + +/* + * The powerpc backend has a hack to strip a leading kerneldump + * header from the core before treating it as an ELF header. + * + * We can add that here if we can get a change to libelf to support + * an inital offset into the file. Alternatively we could patch + * savecore to extract cores from a regular file instead. + */ +int +_kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp) +{ + GElf_Ehdr ehdr; + GElf_Phdr *phdr; + Elf *elf; + size_t i, phnum; + + elf = elf_begin(kd->pmfd, ELF_C_READ, NULL); + if (elf == NULL) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + return (-1); + } + if (elf_kind(elf) != ELF_K_ELF) { + _kvm_err(kd, kd->program, "invalid core"); + goto bad; + } + if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) { + _kvm_err(kd, kd->program, "invalid core"); + goto bad; + } + if (gelf_getehdr(elf, &ehdr) == NULL) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + goto bad; + } + if (ehdr.e_type != ET_CORE) { + _kvm_err(kd, kd->program, "invalid core"); + goto bad; + } + if (ehdr.e_machine != kd->nlehdr.e_machine) { + _kvm_err(kd, kd->program, "invalid core"); + goto bad; + } + + if (elf_getphdrnum(elf, &phnum) == -1) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + goto bad; + } + + phdr = calloc(phnum, sizeof(*phdr)); + if (phdr == NULL) { + _kvm_err(kd, kd->program, "failed to allocate phdrs"); + goto bad; + } + + for (i = 0; i < phnum; i++) { + if (gelf_getphdr(elf, i, &phdr[i]) == NULL) { + _kvm_err(kd, kd->program, "%s", elf_errmsg(0)); + goto bad; + } + } + elf_end(elf); + *phnump = phnum; + *phdrp = phdr; + return (0); + +bad: + elf_end(elf); + return (-1); +} + +static void +_kvm_hpt_insert(struct hpt *hpt, uint64_t pa, off_t off) +{ + struct hpte *hpte; + uint32_t fnv = FNV1_32_INIT; + + fnv = fnv_32_buf(&pa, sizeof(pa), fnv); + fnv &= (HPT_SIZE - 1); + hpte = malloc(sizeof(*hpte)); + hpte->pa = pa; + hpte->off = off; + hpte->next = hpt->hpt_head[fnv]; + hpt->hpt_head[fnv] = hpte; +} + +void +_kvm_hpt_init(kvm_t *kd, struct hpt *hpt, void *base, size_t len, off_t off, + int page_size, int word_size) +{ + uint64_t bits, idx, pa; + uint64_t *base64; + uint32_t *base32; + + base64 = base; + base32 = base; + for (idx = 0; idx < len / word_size; idx++) { + if (word_size == sizeof(uint64_t)) + bits = _kvm64toh(kd, base64[idx]); + else + bits = _kvm32toh(kd, base32[idx]); + pa = idx * word_size * NBBY * page_size; + for (; bits != 0; bits >>= 1, pa += page_size) { + if ((bits & 1) == 0) + continue; + _kvm_hpt_insert(hpt, pa, off); + off += page_size; + } + } +} + +off_t +_kvm_hpt_find(struct hpt *hpt, uint64_t pa) +{ + struct hpte *hpte; + uint32_t fnv = FNV1_32_INIT; + + fnv = fnv_32_buf(&pa, sizeof(pa), fnv); + fnv &= (HPT_SIZE - 1); + for (hpte = hpt->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { + if (pa == hpte->pa) + return (hpte->off); + } + return (-1); +} + +void +_kvm_hpt_free(struct hpt *hpt) +{ + struct hpte *hpte, *next; + int i; + + for (i = 0; i < HPT_SIZE; i++) { + for (hpte = hpt->hpt_head[i]; hpte != NULL; hpte = next) { + next = hpte->next; + free(hpte); + } + } +} + static kvm_t * _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout) { + struct kvm_arch **parch; struct stat st; kd->vmfd = -1; @@ -235,16 +431,40 @@ } /* * This is a crash dump. - * Initialize the virtual address translation machinery, - * but first setup the namelist fd. + * Open the namelist fd and determine the architecture. */ if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) { _kvm_syserr(kd, kd->program, "%s", uf); goto failed; } + if (_kvm_read_kernel_ehdr(kd) < 0) + goto failed; if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0) kd->rawdump = 1; - if (_kvm_initvtop(kd) < 0) + SET_FOREACH(parch, kvm_arch) { + if ((*parch)->ka_probe(kd)) { + kd->arch = *parch; + break; + } + } + if (kd->arch == NULL) { + _kvm_err(kd, kd->program, "unsupported architecture"); + goto failed; + } + + /* + * Non-native kernels require a symbol resolver. + */ + if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) { + _kvm_err(kd, kd->program, + "non-native kernel requires a symbol resolver"); + goto failed; + } + + /* + * Initialize the virtual address translation machinery. + */ + if (kd->arch->ka_initvtop(kd) < 0) goto failed; return (kd); failed: @@ -267,7 +487,6 @@ (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX); return (0); } - kd->program = 0; return (_kvm_open(kd, uf, mf, flag, errout)); } @@ -287,19 +506,33 @@ return (_kvm_open(kd, uf, mf, flag, NULL)); } +kvm_t * +kvm_open2(const char *uf, const char *mf, int flag, char *errout, + int (*resolver)(const char *, kvaddr_t *)) +{ + kvm_t *kd; + + if ((kd = calloc(1, sizeof(*kd))) == NULL) { + (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX); + return (0); + } + kd->resolve_symbol = resolver; + return (_kvm_open(kd, uf, mf, flag, errout)); +} + int kvm_close(kvm_t *kd) { int error = 0; + if (kd->vmst != NULL) + kd->arch->ka_freevtop(kd); if (kd->pmfd >= 0) error |= close(kd->pmfd); if (kd->vmfd >= 0) error |= close(kd->vmfd); if (kd->nlfd >= 0) error |= close(kd->nlfd); - if (kd->vmst) - _kvm_freevtop(kd); if (kd->procbase != 0) free((void *)kd->procbase); if (kd->argbuf != 0) @@ -318,10 +551,10 @@ * symbol names, try again, and merge back what we could resolve. */ static int -kvm_fdnlist_prefix(kvm_t *kd, struct nlist *nl, int missing, const char *prefix, - uintptr_t (*validate_fn)(kvm_t *, uintptr_t)) +kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing, + const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t)) { - struct nlist *n, *np, *p; + struct kvm_nlist *n, *np, *p; char *cp, *ce; const char *ccp; size_t len; @@ -337,14 +570,14 @@ for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; - len += sizeof(struct nlist) + strlen(prefix) + + len += sizeof(struct kvm_nlist) + strlen(prefix) + 2 * (strlen(p->n_name) + 1); unresolved++; } if (unresolved == 0) return (unresolved); /* Add space for the terminating nlist entry. */ - len += sizeof(struct nlist); + len += sizeof(struct kvm_nlist); unresolved++; /* Alloc one chunk for (nlist, [names]) and setup pointers. */ @@ -353,7 +586,7 @@ if (n == NULL) return (missing); cp = ce = (char *)np; - cp += unresolved * sizeof(struct nlist); + cp += unresolved * sizeof(struct kvm_nlist); ce += len; /* Generate shortened nlist with special prefix. */ @@ -361,7 +594,7 @@ for (p = nl; p->n_name && p->n_name[0]; ++p) { if (p->n_type != N_UNDF) continue; - bcopy(p, np, sizeof(struct nlist)); + *np = *p; /* Save the new\0orig. name so we can later match it again. */ slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix, (prefix[0] != '\0' && p->n_name[0] == '_') ? @@ -376,7 +609,7 @@ /* Do lookup on the reduced list. */ np = n; - unresolved = kvm_fdnlist(kd->nlfd, np); + unresolved = kvm_fdnlist(kd, np); /* Check if we could resolve further symbols and update the list. */ if (unresolved >= 0 && unresolved < missing) { @@ -398,8 +631,6 @@ continue; /* Update nlist with new, translated results. */ p->n_type = np->n_type; - p->n_other = np->n_other; - p->n_desc = np->n_desc; if (validate_fn) p->n_value = (*validate_fn)(kd, np->n_value); else @@ -418,9 +649,9 @@ } int -_kvm_nlist(kvm_t *kd, struct nlist *nl, int initialize) +_kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize) { - struct nlist *p; + struct kvm_nlist *p; int nvalid; struct kld_sym_lookup lookup; int error; @@ -433,7 +664,7 @@ * slow library call. */ if (!ISALIVE(kd)) { - error = kvm_fdnlist(kd->nlfd, nl); + error = kvm_fdnlist(kd, nl); if (error <= 0) /* Hard error or success. */ return (error); @@ -475,8 +706,6 @@ if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) { p->n_type = N_TEXT; - p->n_other = 0; - p->n_desc = 0; if (_kvm_vnet_initialized(kd, initialize) && strcmp(prefix, VNET_SYMPREFIX) == 0) p->n_value = @@ -519,7 +748,7 @@ } int -kvm_nlist(kvm_t *kd, struct nlist *nl) +kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl) { /* @@ -529,9 +758,49 @@ return (_kvm_nlist(kd, nl, 1)); } +int +kvm_nlist(kvm_t *kd, struct nlist *nl) +{ + struct kvm_nlist *kl; + int count, i, nfail; + + /* + * Avoid reporting truncated addresses by failing for non-native + * cores. + */ + if (!kvm_native(kd)) { + _kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore"); + return (-1); + } + + for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0'; + count++) + ; + if (count == 0) + return (0); + kl = calloc(count + 1, sizeof(*kl)); + for (i = 0; i < count; i++) + kl[i].n_name = nl[i].n_name; + nfail = kvm_nlist2(kd, kl); + for (i = 0; i < count; i++) { + nl[i].n_type = kl[i].n_type; + nl[i].n_other = 0; + nl[i].n_desc = 0; + nl[i].n_value = kl[i].n_value; + } + return (nfail); +} + ssize_t kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len) { + + return (kvm_read2(kd, kva, buf, len)); +} + +ssize_t +kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len) +{ int cc; ssize_t cr; off_t pa; @@ -544,7 +813,8 @@ */ errno = 0; if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) { - _kvm_err(kd, 0, "invalid address (%lx)", kva); + _kvm_err(kd, 0, "invalid address (0x%jx)", + (uintmax_t)kva); return (-1); } cr = read(kd->vmfd, buf, len); @@ -558,7 +828,7 @@ cp = buf; while (len > 0) { - cc = _kvm_kvatop(kd, kva, &pa); + cc = kd->arch->ka_kvatop(kd, kva, &pa); if (cc == 0) return (-1); if (cc > (ssize_t)len) @@ -574,7 +844,7 @@ break; } /* - * If kvm_kvatop returns a bogus value or our core file is + * If ka_kvatop returns a bogus value or our core file is * truncated, we might wind up seeking beyond the end of the * core file in which case the read will return 0 (EOF). */ @@ -616,3 +886,12 @@ } /* NOTREACHED */ } + +int +kvm_native(kvm_t *kd) +{ + + if (ISALIVE(kd)) + return (1); + return (kd->arch->ka_native(kd)); +} Index: lib/libkvm/kvm_aarch64.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_aarch64.h @@ -0,0 +1,61 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_AARCH64_H__ +#define __KVM_AARCH64_H__ + +#ifdef __aarch64__ +#include +#endif + +typedef uint64_t aarch64_physaddr_t; +typedef uint64_t aarch64_pte_t; + +#define AARCH64_PAGE_SHIFT 12 +#define AARCH64_PAGE_SIZE (1 << AARCH64_PAGE_SHIFT) +#define AARCH64_PAGE_MASK (AARCH64_PAGE_SIZE - 1) + +#define AARCH64_ATTR_MASK 0xfff0000000000fff + +#define AARCH64_ATTR_DESCR_MASK 3 + +#define AARCH64_L3_SHIFT 12 +#define AARCH64_L3_PAGE 0x3 + +#ifdef __aarch64__ +_Static_assert(PAGE_SHIFT == AARCH64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == AARCH64_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == AARCH64_PAGE_MASK, "PAGE_MASK mismatch"); +_Static_assert(ATTR_MASK == AARCH64_ATTR_MASK, "ATTR_MASK mismatch"); +_Static_assert(ATTR_DESCR_MASK == AARCH64_ATTR_DESCR_MASK, + "ATTR_DESCR_MASK mismatch"); +_Static_assert(L3_SHIFT == AARCH64_L3_SHIFT, "L3_SHIFT mismatch"); +_Static_assert(L3_PAGE == AARCH64_L3_PAGE, "L3_PAGE mismatch"); +#endif + +#endif /* !__KVM_AARCH64_H__ */ Index: lib/libkvm/kvm_amd64.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_amd64.h @@ -0,0 +1,88 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_AMD64_H__ +#define __KVM_AMD64_H__ + +#ifdef __amd64__ +#include +#include +#endif + +typedef uint64_t amd64_physaddr_t; +typedef uint64_t amd64_pte_t; +typedef uint64_t amd64_pde_t; +typedef uint64_t amd64_pdpe_t; +typedef uint64_t amd64_pml4e_t; + +#define AMD64_NPTEPG (AMD64_PAGE_SIZE / sizeof(amd64_pte_t)) +#define AMD64_PAGE_SHIFT 12 +#define AMD64_PAGE_SIZE (1 << AMD64_PAGE_SHIFT) +#define AMD64_PAGE_MASK (AMD64_PAGE_SIZE - 1) +#define AMD64_NPDEPG (AMD64_PAGE_SIZE / sizeof(amd64_pde_t)) +#define AMD64_PDRSHIFT 21 +#define AMD64_NBPDR (1 << AMD64_PDRSHIFT) +#define AMD64_PDRMASK (AMD64_NBPDR - 1) +#define AMD64_NPDPEPG (AMD64_PAGE_SIZE / sizeof(amd64_pdpe_t)) +#define AMD64_PDPSHIFT 30 +#define AMD64_NBPDP (1 << AMD64_PDPSHIFT) +#define AMD64_PDPMASK (AMD64_NBPDP - 1) +#define AMD64_NPML4EPG (AMD64_PAGE_SIZE / sizeof(amd64_pml4e_t)) +#define AMD64_PML4SHIFT 39 + +#define AMD64_PG_V 0x001 +#define AMD64_PG_PS 0x080 +#define AMD64_PG_FRAME (0x000ffffffffff000) +#define AMD64_PG_PS_FRAME (0x000fffffffe00000) +#define AMD64_PG_1GB_FRAME (0x000fffffc0000000) + +#ifdef __amd64__ +_Static_assert(NPTEPG == AMD64_NPTEPG, "NPTEPG mismatch"); +_Static_assert(PAGE_SHIFT == AMD64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == AMD64_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == AMD64_PAGE_MASK, "PAGE_MASK mismatch"); +_Static_assert(NPDEPG == AMD64_NPDEPG, "NPDEPG mismatch"); +_Static_assert(PDRSHIFT == AMD64_PDRSHIFT, "PDRSHIFT mismatch"); +_Static_assert(NBPDR == AMD64_NBPDR, "NBPDR mismatch"); +_Static_assert(PDRMASK == AMD64_PDRMASK, "PDRMASK mismatch"); +_Static_assert(NPDPEPG == AMD64_NPDPEPG, "NPDPEPG mismatch"); +_Static_assert(PDPSHIFT == AMD64_PDPSHIFT, "PDPSHIFT mismatch"); +_Static_assert(NBPDP == AMD64_NBPDP, "NBPDP mismatch"); +_Static_assert(PDPMASK == AMD64_PDPMASK, "PDPMASK mismatch"); +_Static_assert(NPML4EPG == AMD64_NPML4EPG, "NPML4EPG mismatch"); +_Static_assert(PML4SHIFT == AMD64_PML4SHIFT, "PML4SHIFT mismatch"); + +_Static_assert(PG_V == AMD64_PG_V, "PG_V mismatch"); +_Static_assert(PG_PS == AMD64_PG_PS, "PG_PS mismatch"); +_Static_assert(PG_FRAME == AMD64_PG_FRAME, "PG_FRAME mismatch"); +_Static_assert(PG_PS_FRAME == AMD64_PG_PS_FRAME, "PG_PS_FRAME mismatch"); +#endif + +int _amd64_native(kvm_t *); + +#endif /* !__KVM_AMD64_H__ */ Index: lib/libkvm/kvm_amd64.c =================================================================== --- lib/libkvm/kvm_amd64.c +++ lib/libkvm/kvm_amd64.c @@ -46,117 +46,76 @@ */ #include -#include -#include -#include -#include +#include +#include #include #include #include -#include #include -#include -#include - -#include - #include #include "kvm_private.h" +#include "kvm_amd64.h" -#ifndef btop -#define btop(x) (amd64_btop(x)) -#define ptob(x) (amd64_ptob(x)) -#endif - -/* minidump must be the first item! */ struct vmstate { - int minidump; /* 1 = minidump mode */ - void *mmapbase; - size_t mmapsize; - pml4_entry_t *PML4; + size_t phnum; + GElf_Phdr *phdr; + amd64_pml4e_t *PML4; }; /* - * Map the ELF headers into the process' address space. We do this in two - * steps: first the ELF header itself and using that information the whole - * set of headers. - */ -static int -_kvm_maphdrs(kvm_t *kd, size_t sz) -{ - struct vmstate *vm = kd->vmst; - - /* munmap() previous mmap(). */ - if (vm->mmapbase != NULL) { - munmap(vm->mmapbase, vm->mmapsize); - vm->mmapbase = NULL; - } - - vm->mmapsize = sz; - vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); - if (vm->mmapbase == MAP_FAILED) { - _kvm_err(kd, kd->program, "cannot mmap corefile"); - return (-1); - } - return (0); -} - -/* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { - Elf_Ehdr *e = kd->vmst->mmapbase; - Elf_Phdr *p; - int n; + struct vmstate *vm = kd->vmst; + GElf_Phdr *p; + size_t n; if (kd->rawdump) { *ofs = pa; - return (PAGE_SIZE - ((size_t)pa & PAGE_MASK)); + return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } - p = (Elf_Phdr*)((char*)e + e->e_phoff); - n = e->e_phnum; + p = vm->phdr; + n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; - return (PAGE_SIZE - ((size_t)pa & PAGE_MASK)); + return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK)); } -void -_kvm_freevtop(kvm_t *kd) +static void +_amd64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (kd->vmst->minidump) - return (_kvm_minidump_freevtop(kd)); - if (vm->mmapbase != NULL) - munmap(vm->mmapbase, vm->mmapsize); if (vm->PML4) free(vm->PML4); + free(vm->phdr); free(vm); kd->vmst = NULL; } -int -_kvm_initvtop(kvm_t *kd) +static int +_amd64_probe(kvm_t *kd) +{ + + return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && + !_kvm_is_minidump(kd)); +} + +static int +_amd64_initvtop(kvm_t *kd) { - struct nlist nl[2]; - u_long pa; - u_long kernbase; - pml4_entry_t *PML4; - Elf_Ehdr *ehdr; - size_t hdrsz; - char minihdr[8]; - - if (!kd->rawdump && pread(kd->pmfd, &minihdr, 8, 0) == 8) - if (memcmp(&minihdr, "minidump", 8) == 0) - return (_kvm_minidump_initvtop(kd)); + struct kvm_nlist nl[2]; + amd64_physaddr_t pa; + kvaddr_t kernbase; + amd64_pml4e_t *PML4; kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); if (kd->vmst == 0) { @@ -166,19 +125,15 @@ kd->vmst->PML4 = 0; if (kd->rawdump == 0) { - if (_kvm_maphdrs(kd, sizeof(Elf_Ehdr)) == -1) - return (-1); - - ehdr = kd->vmst->mmapbase; - hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; - if (_kvm_maphdrs(kd, hdrsz) == -1) + if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, + &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; - if (kvm_nlist(kd, nl) != 0) { + if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no kernbase"); return (-1); } @@ -187,17 +142,18 @@ nl[0].n_name = "KPML4phys"; nl[1].n_name = 0; - if (kvm_nlist(kd, nl) != 0) { + if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist - no KPML4phys"); return (-1); } - if (kvm_read(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != + if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); return (-1); } - PML4 = _kvm_malloc(kd, PAGE_SIZE); - if (kvm_read(kd, pa, PML4, PAGE_SIZE) != PAGE_SIZE) { + pa = le64toh(pa); + PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE); + if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read KPML4phys"); return (-1); } @@ -206,27 +162,27 @@ } static int -_kvm_vatop(kvm_t *kd, u_long va, off_t *pa) +_amd64_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - u_long offset; - u_long pdpe_pa; - u_long pde_pa; - u_long pte_pa; - pml4_entry_t pml4e; - pdp_entry_t pdpe; - pd_entry_t pde; - pt_entry_t pte; - u_long pml4eindex; - u_long pdpeindex; - u_long pdeindex; - u_long pteindex; - u_long a; + amd64_physaddr_t offset; + amd64_physaddr_t pdpe_pa; + amd64_physaddr_t pde_pa; + amd64_physaddr_t pte_pa; + amd64_pml4e_t pml4e; + amd64_pdpe_t pdpe; + amd64_pde_t pde; + amd64_pte_t pte; + kvaddr_t pml4eindex; + kvaddr_t pdpeindex; + kvaddr_t pdeindex; + kvaddr_t pteindex; + amd64_physaddr_t a; off_t ofs; size_t s; vm = kd->vmst; - offset = va & (PAGE_SIZE - 1); + offset = va & AMD64_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer @@ -236,121 +192,141 @@ s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop: bootstrap data not in dump"); + "_amd64_vatop: bootstrap data not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); } - pml4eindex = (va >> PML4SHIFT) & (NPML4EPG - 1); - pml4e = vm->PML4[pml4eindex]; - if (((u_long)pml4e & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pml4e not valid"); + pml4eindex = (va >> AMD64_PML4SHIFT) & (AMD64_NPML4EPG - 1); + pml4e = le64toh(vm->PML4[pml4eindex]); + if ((pml4e & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, "_amd64_vatop: pml4e not valid"); goto invalid; } - pdpeindex = (va >> PDPSHIFT) & (NPDPEPG-1); - pdpe_pa = ((u_long)pml4e & PG_FRAME) + - (pdpeindex * sizeof(pdp_entry_t)); + pdpeindex = (va >> AMD64_PDPSHIFT) & (AMD64_NPDPEPG - 1); + pdpe_pa = (pml4e & AMD64_PG_FRAME) + (pdpeindex * sizeof(amd64_pdpe_t)); s = _kvm_pa2off(kd, pdpe_pa, &ofs); - if (s < sizeof pdpe) { - _kvm_err(kd, kd->program, "_kvm_vatop: pdpe_pa not found"); + if (s < sizeof(pdpe)) { + _kvm_err(kd, kd->program, "_amd64_vatop: pdpe_pa not found"); goto invalid; } - if (lseek(kd->pmfd, ofs, 0) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: lseek pdpe_pa"); + if (pread(kd->pmfd, &pdpe, sizeof(pdpe), ofs) != sizeof(pdpe)) { + _kvm_syserr(kd, kd->program, "_amd64_vatop: read pdpe"); goto invalid; } - if (read(kd->pmfd, &pdpe, sizeof pdpe) != sizeof pdpe) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: read pdpe"); + pdpe = le64toh(pdpe); + if ((pdpe & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, "_amd64_vatop: pdpe not valid"); goto invalid; } - if (((u_long)pdpe & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pdpe not valid"); - goto invalid; + + if (pdpe & AMD64_PG_PS) { + /* + * No next-level page table; pdpe describes one 1GB page. + */ + a = (pde & AMD64_PG_1GB_FRAME) + (va & AMD64_PDPMASK); + s = _kvm_pa2off(kd, a, pa); + if (s == 0) { + _kvm_err(kd, kd->program, + "_amd64_vatop: 1GB page address not in dump"); + goto invalid; + } else + return (AMD64_NBPDP - (va & AMD64_PDPMASK)); } - pdeindex = (va >> PDRSHIFT) & (NPDEPG-1); - pde_pa = ((u_long)pdpe & PG_FRAME) + (pdeindex * sizeof(pd_entry_t)); + pdeindex = (va >> AMD64_PDRSHIFT) & (AMD64_NPDEPG - 1); + pde_pa = (pdpe & AMD64_PG_FRAME) + (pdeindex * sizeof(amd64_pde_t)); s = _kvm_pa2off(kd, pde_pa, &ofs); - if (s < sizeof pde) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: pde_pa not found"); - goto invalid; - } - if (lseek(kd->pmfd, ofs, 0) == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: lseek pde_pa"); + if (s < sizeof(pde)) { + _kvm_syserr(kd, kd->program, "_amd64_vatop: pde_pa not found"); goto invalid; } - if (read(kd->pmfd, &pde, sizeof pde) != sizeof pde) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: read pde"); + if (pread(kd->pmfd, &pde, sizeof(pde), ofs) != sizeof(pde)) { + _kvm_syserr(kd, kd->program, "_amd64_vatop: read pde"); goto invalid; } - if (((u_long)pde & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); + pde = le64toh(pde); + if ((pde & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, "_amd64_vatop: pde not valid"); goto invalid; } - if ((u_long)pde & PG_PS) { - /* - * No final-level page table; ptd describes one 2MB page. - */ -#define PAGE2M_MASK (NBPDR - 1) -#define PG_FRAME2M (~PAGE2M_MASK) - a = ((u_long)pde & PG_FRAME2M) + (va & PAGE2M_MASK); + if (pde & AMD64_PG_PS) { + /* + * No final-level page table; pde describes one 2MB page. + */ + a = (pde & AMD64_PG_PS_FRAME) + (va & AMD64_PDRMASK); s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop: 2MB page address not in dump"); + "_amd64_vatop: 2MB page address not in dump"); goto invalid; } else - return (NBPDR - (va & PAGE2M_MASK)); + return (AMD64_NBPDR - (va & AMD64_PDRMASK)); } - pteindex = (va >> PAGE_SHIFT) & (NPTEPG-1); - pte_pa = ((u_long)pde & PG_FRAME) + (pteindex * sizeof(pt_entry_t)); + pteindex = (va >> AMD64_PAGE_SHIFT) & (AMD64_NPTEPG - 1); + pte_pa = (pde & AMD64_PG_FRAME) + (pteindex * sizeof(amd64_pte_t)); s = _kvm_pa2off(kd, pte_pa, &ofs); - if (s < sizeof pte) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte_pa not found"); - goto invalid; - } - if (lseek(kd->pmfd, ofs, 0) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: lseek"); + if (s < sizeof(pte)) { + _kvm_err(kd, kd->program, "_amd64_vatop: pte_pa not found"); goto invalid; } - if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: read"); + if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { + _kvm_syserr(kd, kd->program, "_amd64_vatop: read"); goto invalid; } - if (((u_long)pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + if ((pte & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, "_amd64_vatop: pte not valid"); goto invalid; } - a = ((u_long)pte & PG_FRAME) + offset; + a = (pte & AMD64_PG_FRAME) + offset; s = _kvm_pa2off(kd, a, pa); if (s == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: address not in dump"); + _kvm_err(kd, kd->program, "_amd64_vatop: address not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_amd64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { - if (kd->vmst->minidump) - return (_kvm_minidump_kvatop(kd, va, pa)); if (ISALIVE(kd)) { _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); return (0); } - return (_kvm_vatop(kd, va, pa)); + return (_amd64_vatop(kd, va, pa)); +} + +int +_amd64_native(kvm_t *kd) +{ + +#ifdef __amd64__ + return (1); +#else + return (0); +#endif } + +struct kvm_arch kvm_amd64 = { + .ka_probe = _amd64_probe, + .ka_initvtop = _amd64_initvtop, + .ka_freevtop = _amd64_freevtop, + .ka_kvatop = _amd64_kvatop, + .ka_native = _amd64_native, +}; + +KVM_ARCH(kvm_amd64); Index: lib/libkvm/kvm_arm.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_arm.h @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_ARM_H__ +#define __KVM_ARM_H__ + +#ifdef __arm__ +#include +#endif + +typedef uint32_t arm_physaddr_t; +typedef uint32_t arm_pd_entry_t; +typedef uint32_t arm_pt_entry_t; + +#define ARM_PAGE_SHIFT 12 +#define ARM_PAGE_SIZE (1 << ARM_PAGE_SHIFT) /* Page size */ +#define ARM_PAGE_MASK (ARM_PAGE_SIZE - 1) + +#define ARM_L1_TABLE_SIZE 0x4000 /* 16K */ + +#define ARM_L1_S_SIZE 0x00100000 /* 1M */ +#define ARM_L1_S_OFFSET (ARM_L1_S_SIZE - 1) +#define ARM_L1_S_FRAME (~ARM_L1_S_OFFSET) +#define ARM_L1_S_SHIFT 20 + +#define ARM_L2_L_SIZE 0x00010000 /* 64K */ +#define ARM_L2_L_OFFSET (ARM_L2_L_SIZE - 1) +#define ARM_L2_L_FRAME (~ARM_L2_L_OFFSET) +#define ARM_L2_L_SHIFT 16 + +#define ARM_L2_S_SIZE 0x00001000 /* 4K */ +#define ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1) +#define ARM_L2_S_FRAME (~ARM_L2_S_OFFSET) +#define ARM_L2_S_SHIFT 12 + +#define ARM_L1_TYPE_INV 0x00 /* Invalid (fault) */ +#define ARM_L1_TYPE_C 0x01 /* Coarse L2 */ +#define ARM_L1_TYPE_S 0x02 /* Section */ +#define ARM_L1_TYPE_MASK 0x03 /* Mask of type bits */ + +#define ARM_L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ +#define ARM_L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ + +#define ARM_L2_TYPE_INV 0x00 /* Invalid (fault) */ +#define ARM_L2_TYPE_L 0x01 /* Large Page - 64k - not used yet*/ +#define ARM_L2_TYPE_S 0x02 /* Small Page - 4 */ +#define ARM_L2_TYPE_MASK 0x03 + +#define ARM_L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ + +#ifdef __arm__ +_Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch"); +_Static_assert(L1_TABLE_SIZE == ARM_L1_TABLE_SIZE, "L1_TABLE_SIZE mismatch"); +_Static_assert(L1_S_SIZE == ARM_L1_S_SIZE, "L1_S_SIZE mismatch"); +_Static_assert(L1_S_OFFSET == ARM_L1_S_OFFSET, "L1_S_OFFSET mismatch"); +_Static_assert(L1_S_FRAME == ARM_L1_S_FRAME, "L1_S_FRAME mismatch"); +_Static_assert(L1_S_SHIFT == ARM_L1_S_SHIFT, "L1_S_SHIFT mismatch"); +_Static_assert(L2_L_SIZE == ARM_L2_L_SIZE, "L2_L_SIZE mismatch"); +_Static_assert(L2_L_OFFSET == ARM_L2_L_OFFSET, "L2_L_OFFSET mismatch"); +_Static_assert(L2_L_FRAME == ARM_L2_L_FRAME, "L2_L_FRAME mismatch"); +_Static_assert(L2_L_SHIFT == ARM_L2_L_SHIFT, "L2_L_SHIFT mismatch"); +_Static_assert(L2_S_SIZE == ARM_L2_S_SIZE, "L2_S_SIZE mismatch"); +_Static_assert(L2_S_OFFSET == ARM_L2_S_OFFSET, "L2_S_OFFSET mismatch"); +_Static_assert(L2_S_FRAME == ARM_L2_S_FRAME, "L2_S_FRAME mismatch"); +_Static_assert(L2_S_SHIFT == ARM_L2_S_SHIFT, "L2_S_SHIFT mismatch"); +_Static_assert(L1_TYPE_INV == ARM_L1_TYPE_INV, "L1_TYPE_INV mismatch"); +_Static_assert(L1_TYPE_C == ARM_L1_TYPE_C, "L1_TYPE_C mismatch"); +_Static_assert(L1_TYPE_S == ARM_L1_TYPE_S, "L1_TYPE_S mismatch"); +_Static_assert(L1_TYPE_MASK == ARM_L1_TYPE_MASK, "L1_TYPE_MASK mismatch"); +_Static_assert(L1_S_ADDR_MASK == ARM_L1_S_ADDR_MASK, "L1_S_ADDR_MASK mismatch"); +_Static_assert(L1_C_ADDR_MASK == ARM_L1_C_ADDR_MASK, "L1_C_ADDR_MASK mismatch"); +_Static_assert(L2_TYPE_INV == ARM_L2_TYPE_INV, "L2_TYPE_INV mismatch"); +_Static_assert(L2_TYPE_L == ARM_L2_TYPE_L, "L2_TYPE_L mismatch"); +_Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch"); +_Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch"); +_Static_assert(L2_ADDR_BITS == ARM_L2_ADDR_BITS, "L2_ADDR_BITS mismatch"); +#endif + +int _arm_native(kvm_t *); + +#endif /* !__KVM_ARM_H__ */ Index: lib/libkvm/kvm_arm.c =================================================================== --- lib/libkvm/kvm_arm.c +++ lib/libkvm/kvm_arm.c @@ -39,67 +39,38 @@ __FBSDID("$FreeBSD$"); #include -#include -#include - -#ifndef CROSS_LIBKVM -#include -#include -#include -#include -#else -#include "../../sys/arm/include/pte.h" -#include "../../sys/arm/include/vmparam.h" -#endif - -#include -#include +#include #include +#include +#include #include -#include #include +#ifdef __arm__ +#include +#endif + #include "kvm_private.h" +#include "kvm_arm.h" -/* minidump must be the first item! */ struct vmstate { - int minidump; /* 1 = minidump mode */ - pd_entry_t *l1pt; - void *mmapbase; - size_t mmapsize; + arm_pd_entry_t *l1pt; + size_t phnum; + GElf_Phdr *phdr; }; -static int -_kvm_maphdrs(kvm_t *kd, size_t sz) -{ - struct vmstate *vm = kd->vmst; - - /* munmap() previous mmap(). */ - if (vm->mmapbase != NULL) { - munmap(vm->mmapbase, vm->mmapsize); - vm->mmapbase = NULL; - } - - vm->mmapsize = sz; - vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); - if (vm->mmapbase == MAP_FAILED) { - _kvm_err(kd, kd->program, "cannot mmap corefile"); - return (-1); - } - - return (0); -} - /* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) { - Elf32_Ehdr *e = kd->vmst->mmapbase; - Elf32_Phdr *p = (Elf32_Phdr*)((char*)e + e->e_phoff); - int n = e->e_phnum; + struct vmstate *vm = kd->vmst; + GElf_Phdr *p; + size_t n; + p = vm->phdr; + n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) @@ -111,40 +82,38 @@ return (pgsz - ((size_t)pa & (pgsz - 1))); } -void -_kvm_freevtop(kvm_t *kd) +static void +_arm_freevtop(kvm_t *kd) { - if (kd->vmst != 0) { - if (kd->vmst->minidump) - return (_kvm_minidump_freevtop(kd)); - if (kd->vmst->mmapbase != NULL) - munmap(kd->vmst->mmapbase, kd->vmst->mmapsize); - free(kd->vmst); - kd->vmst = NULL; - } + struct vmstate *vm = kd->vmst; + + free(vm->phdr); + free(vm); + kd->vmst = NULL; } -int -_kvm_initvtop(kvm_t *kd) +static int +_arm_probe(kvm_t *kd) +{ + + return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && + !_kvm_is_minidump(kd)); +} + +static int +_arm_initvtop(kvm_t *kd) { struct vmstate *vm; - struct nlist nl[2]; - u_long kernbase, physaddr, pa; - pd_entry_t *l1pt; - Elf32_Ehdr *ehdr; - Elf32_Phdr *phdr; - size_t hdrsz; - char minihdr[8]; - int found, i; - - if (!kd->rawdump) { - if (pread(kd->pmfd, &minihdr, 8, 0) == 8) { - if (memcmp(&minihdr, "minidump", 8) == 0) - return (_kvm_minidump_initvtop(kd)); - } else { - _kvm_err(kd, kd->program, "cannot read header"); - return (-1); - } + struct kvm_nlist nl[2]; + kvaddr_t kernbase; + arm_physaddr_t physaddr, pa; + arm_pd_entry_t *l1pt; + size_t i; + int found; + + if (kd->rawdump) { + _kvm_err(kd, kd->program, "raw dumps not supported on arm"); + return (-1); } vm = _kvm_malloc(kd, sizeof(*vm)); @@ -154,19 +123,15 @@ } kd->vmst = vm; vm->l1pt = NULL; - if (_kvm_maphdrs(kd, sizeof(Elf32_Ehdr)) == -1) - return (-1); - ehdr = kd->vmst->mmapbase; - hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; - if (_kvm_maphdrs(kd, hdrsz) == -1) + + if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1) return (-1); - phdr = (Elf32_Phdr *)((uint8_t *)ehdr + ehdr->e_phoff); found = 0; - for (i = 0; i < ehdr->e_phnum; i++) { - if (phdr[i].p_type == PT_DUMP_DELTA) { - kernbase = phdr[i].p_vaddr; - physaddr = phdr[i].p_paddr; + for (i = 0; i < vm->phnum; i++) { + if (vm->phdr[i].p_type == PT_DUMP_DELTA) { + kernbase = vm->phdr[i].p_vaddr; + physaddr = vm->phdr[i].p_paddr; found = 1; break; } @@ -175,30 +140,35 @@ nl[1].n_name = NULL; if (!found) { nl[0].n_name = "kernbase"; - if (kvm_nlist(kd, nl) != 0) + if (kvm_nlist2(kd, nl) != 0) { +#ifdef __arm__ kernbase = KERNBASE; - else +#else + _kvm_err(kd, kd->program, "cannot resolve kernbase"); + return (-1); +#endif + } else kernbase = nl[0].n_value; nl[0].n_name = "physaddr"; - if (kvm_nlist(kd, nl) != 0) { + if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "couldn't get phys addr"); return (-1); } physaddr = nl[0].n_value; } nl[0].n_name = "kernel_l1pa"; - if (kvm_nlist(kd, nl) != 0) { + if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } - if (kvm_read(kd, (nl[0].n_value - kernbase + physaddr), &pa, + if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read kernel_l1pa"); return (-1); } - l1pt = _kvm_malloc(kd, L1_TABLE_SIZE); - if (kvm_read(kd, pa, l1pt, L1_TABLE_SIZE) != L1_TABLE_SIZE) { + l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE); + if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) { _kvm_err(kd, kd->program, "cannot read l1pt"); free(l1pt); return (-1); @@ -208,62 +178,51 @@ } /* from arm/pmap.c */ -#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) -/* from arm/pmap.h */ -#define L1_TYPE_INV 0x00 /* Invalid (fault) */ -#define L1_TYPE_C 0x01 /* Coarse L2 */ -#define L1_TYPE_S 0x02 /* Section */ -#define L1_TYPE_F 0x03 /* Fine L2 */ -#define L1_TYPE_MASK 0x03 /* mask of type bits */ - -#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) +#define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT) + +#define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S) #define l1pte_valid(pde) ((pde) != 0) #define l2pte_valid(pte) ((pte) != 0) -#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) +#define l2pte_index(v) (((v) & ARM_L2_ADDR_BITS) >> ARM_L2_S_SHIFT) -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm = kd->vmst; - pd_entry_t pd; - pt_entry_t pte; - off_t pte_pa; - - if (kd->vmst->minidump) - return (_kvm_minidump_kvatop(kd, va, pa)); + arm_pd_entry_t pd; + arm_pt_entry_t pte; + arm_physaddr_t pte_pa; + off_t pte_off; if (vm->l1pt == NULL) - return (_kvm_pa2off(kd, va, pa, PAGE_SIZE)); - pd = vm->l1pt[L1_IDX(va)]; + return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE)); + pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]); if (!l1pte_valid(pd)) goto invalid; if (l1pte_section_p(pd)) { /* 1MB section mapping. */ - *pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET); - return (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE)); + *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET); + return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE)); } - pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte); - _kvm_pa2off(kd, pte_pa, &pte_pa, L1_S_SIZE); - if (lseek(kd->pmfd, pte_pa, 0) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek"); - goto invalid; - } - if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) { - _kvm_syserr(kd, kd->program, "_kvm_kvatop: read"); + pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte); + _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE); + if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) { + _kvm_syserr(kd, kd->program, "_arm_kvatop: pread"); goto invalid; } + pte = _kvm32toh(kd, pte); if (!l2pte_valid(pte)) { goto invalid; } - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) { - *pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE)); + if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { + *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET); + return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE)); } - *pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE)); + *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET); + return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE)); invalid: - _kvm_err(kd, 0, "Invalid address (%lx)", va); + _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va); return 0; } @@ -284,3 +243,28 @@ return (0); } #endif + +int +_arm_native(kvm_t *kd) +{ + +#ifdef __arm__ +#if _BYTE_ORDER == _LITTLE_ENDIAN + return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); +#else + return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); +#endif +#else + return (0); +#endif +} + +struct kvm_arch kvm_arm = { + .ka_probe = _arm_probe, + .ka_initvtop = _arm_initvtop, + .ka_freevtop = _arm_freevtop, + .ka_kvatop = _arm_kvatop, + .ka_native = _arm_native, +}; + +KVM_ARCH(kvm_arm); Index: lib/libkvm/kvm_cptime.c =================================================================== --- lib/libkvm/kvm_cptime.c +++ lib/libkvm/kvm_cptime.c @@ -96,6 +96,12 @@ return (getsysctl(kd, "kern.cp_time", cp_time, sizeof(long) * CPUSTATES)); + if (!kd->arch->ka_native(kd)) { + _kvm_err(kd, kd->program, + "cannot read cp_time from non-native core"); + return (-1); + } + if (kvm_cp_time_cached == 0) { if (_kvm_cp_time_init(kd) < 0) return (-1); Index: lib/libkvm/kvm_getloadavg.c =================================================================== --- lib/libkvm/kvm_getloadavg.c +++ lib/libkvm/kvm_getloadavg.c @@ -71,6 +71,12 @@ if (ISALIVE(kd)) return (getloadavg(loadavg, nelem)); + if (!kd->arch->ka_native(kd)) { + _kvm_err(kd, kd->program, + "cannot read loadavg from non-native core"); + return (-1); + } + if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p); _kvm_err(kd, kd->program, Index: lib/libkvm/kvm_getswapinfo.c =================================================================== --- lib/libkvm/kvm_getswapinfo.c +++ lib/libkvm/kvm_getswapinfo.c @@ -117,6 +117,12 @@ struct swdevt *sp, swinfo; struct kvm_swap tot; + if (!kd->arch->ka_native(kd)) { + _kvm_err(kd, kd->program, + "cannot read swapinfo from non-native core"); + return (-1); + } + if (!nlist_init(kd)) return (-1); Index: lib/libkvm/kvm_i386.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_i386.h @@ -0,0 +1,79 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_I386_H__ +#define __KVM_I386_H__ + +#ifdef __i386__ +#include +#include +#endif + +typedef uint32_t i386_physaddr_t; +typedef uint32_t i386_pte_t; +typedef uint32_t i386_pde_t; +typedef uint64_t i386_physaddr_pae_t; +typedef uint64_t i386_pte_pae_t; +typedef uint64_t i386_pde_pae_t; + +#define I386_PAGE_SHIFT 12 +#define I386_PAGE_SIZE (1 << I386_PAGE_SHIFT) +#define I386_PAGE_MASK (I386_PAGE_SIZE - 1) +#define I386_NPTEPG (I386_PAGE_SIZE / sizeof(i386_pte_t)) +#define I386_PDRSHIFT 22 +#define I386_NBPDR (1 << I386_PDRSHIFT) +#define I386_PAGE_PS_MASK (I386_NBPDR - 1) +#define I386_NPTEPG_PAE (I386_PAGE_SIZE / sizeof(i386_pte_pae_t)) +#define I386_PDRSHIFT_PAE 21 +#define I386_NBPDR_PAE (1 << I386_PDRSHIFT_PAE) +#define I386_PAGE_PS_MASK_PAE (I386_NBPDR_PAE - 1) + +#define I386_PG_V 0x001 +#define I386_PG_PS 0x080 +#define I386_PG_FRAME_PAE (0x000ffffffffff000ull) +#define I386_PG_PS_FRAME_PAE (0x000fffffffe00000ull) +#define I386_PG_FRAME (0xfffff000) +#define I386_PG_PS_FRAME (0xffc00000) + +#ifdef __i386__ +_Static_assert(PAGE_SHIFT == I386_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == I386_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == I386_PAGE_MASK, "PAGE_MASK mismatch"); +_Static_assert(NPTEPG == I386_NPTEPG, "NPTEPG mismatch"); +_Static_assert(PDRSHIFT == I386_PDRSHIFT, "PDRSHIFT mismatch"); +_Static_assert(NBPDR == I386_NBPDR, "NBPDR mismatch"); + +_Static_assert(PG_V == I386_PG_V, "PG_V mismatch"); +_Static_assert(PG_PS == I386_PG_PS, "PG_PS mismatch"); +_Static_assert(PG_FRAME == I386_PG_FRAME, "PG_FRAME mismatch"); +_Static_assert(PG_PS_FRAME == I386_PG_PS_FRAME, "PG_PS_FRAME mismatch"); +#endif + +int _i386_native(kvm_t *); + +#endif /* !__KVM_I386_H__ */ Index: lib/libkvm/kvm_i386.c =================================================================== --- lib/libkvm/kvm_i386.c +++ lib/libkvm/kvm_i386.c @@ -46,171 +46,133 @@ */ #include -#include -#include -#include -#include +#include +#include #include #include #include -#include #include -#include -#include - -#include +#ifdef __i386__ +#include /* For KERNBASE. */ +#endif #include #include "kvm_private.h" +#include "kvm_i386.h" -#ifndef btop -#define btop(x) (i386_btop(x)) -#define ptob(x) (i386_ptob(x)) -#endif - -#define PG_FRAME_PAE (~((uint64_t)PAGE_MASK)) -#define PDRSHIFT_PAE 21 -#define NPTEPG_PAE (PAGE_SIZE/sizeof(uint64_t)) -#define NBPDR_PAE (1<vmst; - - /* munmap() previous mmap(). */ - if (vm->mmapbase != NULL) { - munmap(vm->mmapbase, vm->mmapsize); - vm->mmapbase = NULL; - } - - vm->mmapsize = sz; - vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); - if (vm->mmapbase == MAP_FAILED) { - _kvm_err(kd, kd->program, "cannot mmap corefile"); - return (-1); - } - return (0); -} - -/* * Translate a physical memory address to a file-offset in the crash-dump. */ static size_t _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs) { - Elf_Ehdr *e = kd->vmst->mmapbase; - Elf_Phdr *p; - int n; + struct vmstate *vm = kd->vmst; + GElf_Phdr *p; + size_t n; if (kd->rawdump) { *ofs = pa; - return (PAGE_SIZE - ((size_t)pa & PAGE_MASK)); + return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } - p = (Elf_Phdr*)((char*)e + e->e_phoff); - n = e->e_phnum; + p = vm->phdr; + n = vm->phnum; while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) return (0); *ofs = (pa - p->p_paddr) + p->p_offset; - return (PAGE_SIZE - ((size_t)pa & PAGE_MASK)); + return (I386_PAGE_SIZE - (pa & I386_PAGE_MASK)); } -void -_kvm_freevtop(kvm_t *kd) +static void +_i386_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (kd->vmst->minidump) - return (_kvm_minidump_freevtop(kd)); - if (vm->mmapbase != NULL) - munmap(vm->mmapbase, vm->mmapsize); if (vm->PTD) free(vm->PTD); + free(vm->phdr); free(vm); kd->vmst = NULL; } -int -_kvm_initvtop(kvm_t *kd) +static int +_i386_probe(kvm_t *kd) { - struct nlist nl[2]; - u_long pa; - u_long kernbase; + + return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && + !_kvm_is_minidump(kd)); +} + +static int +_i386_initvtop(kvm_t *kd) +{ + struct kvm_nlist nl[2]; + i386_physaddr_t pa; + kvaddr_t kernbase; char *PTD; - Elf_Ehdr *ehdr; - size_t hdrsz; int i; - char minihdr[8]; - - if (!kd->rawdump && pread(kd->pmfd, &minihdr, 8, 0) == 8) - if (memcmp(&minihdr, "minidump", 8) == 0) - return (_kvm_minidump_initvtop(kd)); - kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); - if (kd->vmst == 0) { + kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(struct vmstate)); + if (kd->vmst == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst->PTD = 0; if (kd->rawdump == 0) { - if (_kvm_maphdrs(kd, sizeof(Elf_Ehdr)) == -1) - return (-1); - - ehdr = kd->vmst->mmapbase; - hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; - if (_kvm_maphdrs(kd, hdrsz) == -1) + if (_kvm_read_core_phdrs(kd, &kd->vmst->phnum, + &kd->vmst->phdr) == -1) return (-1); } nl[0].n_name = "kernbase"; nl[1].n_name = 0; - if (kvm_nlist(kd, nl) != 0) + if (kvm_nlist2(kd, nl) != 0) { +#ifdef __i386__ kernbase = KERNBASE; /* for old kernels */ - else +#else + _kvm_err(kd, kd->program, "cannot resolve kernbase"); + return (-1); +#endif + } else kernbase = nl[0].n_value; nl[0].n_name = "IdlePDPT"; nl[1].n_name = 0; - if (kvm_nlist(kd, nl) == 0) { - uint64_t pa64; + if (kvm_nlist2(kd, nl) == 0) { + i386_physaddr_pae_t pa64; - if (kvm_read(kd, (nl[0].n_value - kernbase), &pa, + if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePDPT"); return (-1); } - PTD = _kvm_malloc(kd, 4 * PAGE_SIZE); + pa = le32toh(pa); + PTD = _kvm_malloc(kd, 4 * I386_PAGE_SIZE); for (i = 0; i < 4; i++) { - if (kvm_read(kd, pa + (i * sizeof(pa64)), &pa64, + if (kvm_read2(kd, pa + (i * sizeof(pa64)), &pa64, sizeof(pa64)) != sizeof(pa64)) { _kvm_err(kd, kd->program, "Cannot read PDPT"); free(PTD); return (-1); } - if (kvm_read(kd, pa64 & PG_FRAME_PAE, - PTD + (i * PAGE_SIZE), PAGE_SIZE) != (PAGE_SIZE)) { + pa64 = le64toh(pa64); + if (kvm_read2(kd, pa64 & I386_PG_FRAME_PAE, + PTD + (i * I386_PAGE_SIZE), I386_PAGE_SIZE) != + I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PDPT"); free(PTD); return (-1); @@ -222,17 +184,18 @@ nl[0].n_name = "IdlePTD"; nl[1].n_name = 0; - if (kvm_nlist(kd, nl) != 0) { + if (kvm_nlist2(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } - if (kvm_read(kd, (nl[0].n_value - kernbase), &pa, + if (kvm_read2(kd, (nl[0].n_value - kernbase), &pa, sizeof(pa)) != sizeof(pa)) { _kvm_err(kd, kd->program, "cannot read IdlePTD"); return (-1); } - PTD = _kvm_malloc(kd, PAGE_SIZE); - if (kvm_read(kd, pa, PTD, PAGE_SIZE) != PAGE_SIZE) { + pa = le32toh(pa); + PTD = _kvm_malloc(kd, I386_PAGE_SIZE); + if (kvm_read2(kd, pa, PTD, I386_PAGE_SIZE) != I386_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read PTD"); return (-1); } @@ -243,24 +206,23 @@ } static int -_kvm_vatop(kvm_t *kd, u_long va, off_t *pa) +_i386_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - u_long offset; - u_long pte_pa; - u_long pde_pa; - pd_entry_t pde; - pt_entry_t pte; - u_long pdeindex; - u_long pteindex; + i386_physaddr_t offset; + i386_physaddr_t pte_pa; + i386_pde_t pde; + i386_pte_t pte; + kvaddr_t pdeindex; + kvaddr_t pteindex; size_t s; - u_long a; + i386_physaddr_t a; off_t ofs; - uint32_t *PTD; + i386_pde_t *PTD; vm = kd->vmst; - PTD = (uint32_t *)vm->PTD; - offset = va & (PAGE_SIZE - 1); + PTD = (i386_pde_t *)vm->PTD; + offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer @@ -270,93 +232,87 @@ s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop: bootstrap data not in dump"); + "_i386_vatop: bootstrap data not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); } - pdeindex = va >> PDRSHIFT; - pde = PTD[pdeindex]; - if (((u_long)pde & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); + pdeindex = va >> I386_PDRSHIFT; + pde = le32toh(PTD[pdeindex]); + if ((pde & I386_PG_V) == 0) { + _kvm_err(kd, kd->program, "_i386_vatop: pde not valid"); goto invalid; } - if ((u_long)pde & PG_PS) { - /* - * No second-level page table; ptd describes one 4MB page. - * (We assume that the kernel wouldn't set PG_PS without enabling - * it cr0). - */ -#define PAGE4M_MASK (NBPDR - 1) -#define PG_FRAME4M (~PAGE4M_MASK) - pde_pa = ((u_long)pde & PG_FRAME4M) + (va & PAGE4M_MASK); - s = _kvm_pa2off(kd, pde_pa, &ofs); + if (pde & I386_PG_PS) { + /* + * No second-level page table; ptd describes one 4MB + * page. (We assume that the kernel wouldn't set + * PG_PS without enabling it cr0). + */ + offset = va & I386_PAGE_PS_MASK; + a = (pde & I386_PG_PS_FRAME) + offset; + s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop: 4MB page address not in dump"); + "_i386_vatop: 4MB page address not in dump"); goto invalid; } - *pa = ofs; - return (NBPDR - (va & PAGE4M_MASK)); + return (I386_NBPDR - offset); } - pteindex = (va >> PAGE_SHIFT) & (NPTEPG-1); - pte_pa = ((u_long)pde & PG_FRAME) + (pteindex * sizeof(pde)); + pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG - 1); + pte_pa = (pde & I386_PG_FRAME) + (pteindex * sizeof(pte)); s = _kvm_pa2off(kd, pte_pa, &ofs); - if (s < sizeof pte) { - _kvm_err(kd, kd->program, "_kvm_vatop: pdpe_pa not found"); + if (s < sizeof(pte)) { + _kvm_err(kd, kd->program, "_i386_vatop: pte_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ - if (lseek(kd->pmfd, ofs, 0) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: lseek"); - goto invalid; - } - if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) { - _kvm_syserr(kd, kd->program, "_kvm_vatop: read"); + if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { + _kvm_syserr(kd, kd->program, "_i386_vatop: pread"); goto invalid; } - if (((u_long)pte & PG_V) == 0) { + pte = le32toh(pte); + if ((pte & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop: pte not valid"); goto invalid; } - a = ((u_long)pte & PG_FRAME) + offset; - s =_kvm_pa2off(kd, a, pa); + a = (pte & I386_PG_FRAME) + offset; + s = _kvm_pa2off(kd, a, pa); if (s == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: address not in dump"); + _kvm_err(kd, kd->program, "_i386_vatop: address not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int -_kvm_vatop_pae(kvm_t *kd, u_long va, off_t *pa) +_i386_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - uint64_t offset; - uint64_t pte_pa; - uint64_t pde_pa; - uint64_t pde; - uint64_t pte; - u_long pdeindex; - u_long pteindex; + i386_physaddr_pae_t offset; + i386_physaddr_pae_t pte_pa; + i386_pde_pae_t pde; + i386_pte_pae_t pte; + kvaddr_t pdeindex; + kvaddr_t pteindex; size_t s; - uint64_t a; + i386_physaddr_pae_t a; off_t ofs; - uint64_t *PTD; + i386_pde_pae_t *PTD; vm = kd->vmst; - PTD = (uint64_t *)vm->PTD; - offset = va & (PAGE_SIZE - 1); + PTD = (i386_pde_pae_t *)vm->PTD; + offset = va & I386_PAGE_MASK; /* * If we are initializing (kernel page table descriptor pointer @@ -366,87 +322,101 @@ s = _kvm_pa2off(kd, va, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop_pae: bootstrap data not in dump"); + "_i386_vatop_pae: bootstrap data not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); } - pdeindex = va >> PDRSHIFT_PAE; - pde = PTD[pdeindex]; - if (((u_long)pde & PG_V) == 0) { + pdeindex = va >> I386_PDRSHIFT_PAE; + pde = le64toh(PTD[pdeindex]); + if ((pde & I386_PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_kvatop_pae: pde not valid"); goto invalid; } - if ((u_long)pde & PG_PS) { - /* - * No second-level page table; ptd describes one 2MB page. - * (We assume that the kernel wouldn't set PG_PS without enabling - * it cr0). - */ -#define PAGE2M_MASK (NBPDR_PAE - 1) -#define PG_FRAME2M (~PAGE2M_MASK) - pde_pa = ((u_long)pde & PG_FRAME2M) + (va & PAGE2M_MASK); - s = _kvm_pa2off(kd, pde_pa, &ofs); + if (pde & I386_PG_PS) { + /* + * No second-level page table; ptd describes one 2MB + * page. (We assume that the kernel wouldn't set + * PG_PS without enabling it cr0). + */ + offset = va & I386_PAGE_PS_MASK_PAE; + a = (pde & I386_PG_PS_FRAME_PAE) + offset; + s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop: 2MB page address not in dump"); + "_i386_vatop: 2MB page address not in dump"); goto invalid; } - *pa = ofs; - return (NBPDR_PAE - (va & PAGE2M_MASK)); + return (I386_NBPDR_PAE - offset); } - pteindex = (va >> PAGE_SHIFT) & (NPTEPG_PAE-1); - pte_pa = ((uint64_t)pde & PG_FRAME_PAE) + (pteindex * sizeof(pde)); + pteindex = (va >> I386_PAGE_SHIFT) & (I386_NPTEPG_PAE - 1); + pte_pa = (pde & I386_PG_FRAME_PAE) + (pteindex * sizeof(pde)); s = _kvm_pa2off(kd, pte_pa, &ofs); - if (s < sizeof pte) { - _kvm_err(kd, kd->program, "_kvm_vatop_pae: pdpe_pa not found"); + if (s < sizeof(pte)) { + _kvm_err(kd, kd->program, "_i386_vatop_pae: pdpe_pa not found"); goto invalid; } /* XXX This has to be a physical address read, kvm_read is virtual */ - if (lseek(kd->pmfd, ofs, 0) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_vatop_pae: lseek"); - goto invalid; - } - if (read(kd->pmfd, &pte, sizeof pte) != sizeof pte) { - _kvm_syserr(kd, kd->program, "_kvm_vatop_pae: read"); + if (pread(kd->pmfd, &pte, sizeof(pte), ofs) != sizeof(pte)) { + _kvm_syserr(kd, kd->program, "_i386_vatop_pae: read"); goto invalid; } - if (((uint64_t)pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop_pae: pte not valid"); + pte = le64toh(pte); + if ((pte & I386_PG_V) == 0) { + _kvm_err(kd, kd->program, "_i386_vatop_pae: pte not valid"); goto invalid; } - a = ((uint64_t)pte & PG_FRAME_PAE) + offset; - s =_kvm_pa2off(kd, a, pa); + a = (pte & I386_PG_FRAME_PAE) + offset; + s = _kvm_pa2off(kd, a, pa); if (s == 0) { _kvm_err(kd, kd->program, - "_kvm_vatop_pae: address not in dump"); + "_i386_vatop_pae: address not in dump"); goto invalid; } else - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_i386_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { - if (kd->vmst->minidump) - return (_kvm_minidump_kvatop(kd, va, pa)); if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } if (kd->vmst->pae) - return (_kvm_vatop_pae(kd, va, pa)); + return (_i386_vatop_pae(kd, va, pa)); else - return (_kvm_vatop(kd, va, pa)); + return (_i386_vatop(kd, va, pa)); +} + +int +_i386_native(kvm_t *kd) +{ + +#ifdef __i386__ + return (1); +#else + return (0); +#endif } + +struct kvm_arch kvm_i386 = { + .ka_probe = _i386_probe, + .ka_initvtop = _i386_initvtop, + .ka_freevtop = _i386_freevtop, + .ka_kvatop = _i386_kvatop, + .ka_native = _i386_native, +}; + +KVM_ARCH(kvm_i386); Index: lib/libkvm/kvm_minidump_aarch64.c =================================================================== --- lib/libkvm/kvm_minidump_aarch64.c +++ lib/libkvm/kvm_minidump_aarch64.c @@ -33,112 +33,51 @@ */ #include -#include -#include -#include -#include -#include +#include #include #include #include -#include #include -#include -#include - -#include -#include -#include -#include +#include "../../sys/arm64/include/minidump.h" #include -#include #include "kvm_private.h" +#include "kvm_aarch64.h" -struct hpte { - struct hpte *next; - vm_paddr_t pa; - int64_t off; -}; - -#define HPT_SIZE 1024 +#define aarch64_round_page(x) roundup2((kvaddr_t)(x), AARCH64_PAGE_SIZE) -/* minidump must be the first item! */ struct vmstate { - int minidump; /* 1 = minidump mode */ struct minidumphdr hdr; - void *hpt_head[HPT_SIZE]; - uint64_t *bitmap; + struct hpt hpt; uint64_t *page_map; }; -static void -hpt_insert(kvm_t *kd, vm_paddr_t pa, int64_t off) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - hpte = malloc(sizeof(*hpte)); - hpte->pa = pa; - hpte->off = off; - hpte->next = kd->vmst->hpt_head[fnv]; - kd->vmst->hpt_head[fnv] = hpte; -} - -static int64_t -hpt_find(kvm_t *kd, vm_paddr_t pa) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { - if (pa == hpte->pa) - return (hpte->off); - } - return (-1); -} - static int -inithash(kvm_t *kd, uint64_t *base, int len, off_t off) +_aarch64_minidump_probe(kvm_t *kd) { - uint64_t idx; - uint64_t bit, bits; - vm_paddr_t pa; - for (idx = 0; idx < len / sizeof(*base); idx++) { - bits = base[idx]; - while (bits) { - bit = ffsl(bits) - 1; - bits &= ~(1ul << bit); - pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; - hpt_insert(kd, pa, off); - off += PAGE_SIZE; - } - } - return (off); + return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_AARCH64) && + _kvm_is_minidump(kd)); } -void -_kvm_minidump_freevtop(kvm_t *kd) +static void +_aarch64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - free(vm->bitmap); + _kvm_hpt_free(&vm->hpt); free(vm->page_map); free(vm); kd->vmst = NULL; } -int -_kvm_minidump_initvtop(kvm_t *kd) +static int +_aarch64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; + uint64_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); @@ -147,7 +86,6 @@ return (-1); } kd->vmst = vmst; - vmst->minidump = 1; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); @@ -159,111 +97,157 @@ return (-1); } - if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { + vmst->hdr.version = le32toh(vmst->hdr.version); + if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } + vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); + vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); + vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); + vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); + vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys); + vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); + vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ - off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); + off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize); - vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); - if (vmst->bitmap == NULL) { + bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); + if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } - if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != - vmst->hdr.bitmapsize) { + if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != + (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); + free(bitmap); return (-1); } - off += round_page(vmst->hdr.bitmapsize); + off += aarch64_round_page(vmst->hdr.bitmapsize); vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); if (vmst->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize); + free(bitmap); return (-1); } /* This is the end of the dump, savecore may have truncated it. */ + /* + * XXX: This doesn't make sense. The pmap is not at the end, + * and if it is truncated we don't have any actual data (it's + * all stored after the bitmap and pmap. -- jhb + */ if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) < - PAGE_SIZE) { + AARCH64_PAGE_SIZE) { _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize); + free(bitmap); + return (-1); } off += vmst->hdr.pmapsize; /* build physical address hash table for sparse pages */ - inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); + _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, + AARCH64_PAGE_SIZE, sizeof(*bitmap)); + free(bitmap); return (0); } static int -_kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa) +_aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - u_long offset; - pt_entry_t l3; - u_long l3_index; - u_long a; + aarch64_physaddr_t offset; + aarch64_pte_t l3; + kvaddr_t l3_index; + aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; - offset = va & PAGE_MASK; + offset = va & AARCH64_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { - a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~PAGE_MASK; - ofs = hpt_find(kd, a); + a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & + ~AARCH64_PAGE_MASK; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: " - "direct map address 0x%lx not in minidump", va); + _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " + "direct map address 0x%jx not in minidump", + (uintmax_t)va); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AARCH64_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { - l3_index = (va - vm->hdr.kernbase) >> L3_SHIFT; + l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; - l3 = vm->page_map[l3_index]; - if ((l3 & ATTR_DESCR_MASK) != L3_PAGE) { - _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); + l3 = le64toh(vm->page_map[l3_index]); + if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { + _kvm_err(kd, kd->program, + "_aarch64_minidump_vatop: pde not valid"); goto invalid; } - a = l3 & ~ATTR_MASK; - ofs = hpt_find(kd, a); + a = l3 & ~AARCH64_ATTR_MASK; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: " - "physical address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " + "physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AARCH64_PAGE_SIZE - offset); } else { _kvm_err(kd, kd->program, - "_kvm_vatop: virtual address 0x%lx not minidumped", va); + "_aarch64_minidump_vatop: virtual address 0x%jx not minidumped", + (uintmax_t)va); goto invalid; } invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -int -_kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_aarch64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { - _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); + _kvm_err(kd, 0, + "_aarch64_minidump_kvatop called in live kernel!"); return (0); } - return (_kvm_minidump_vatop(kd, va, pa)); + return (_aarch64_minidump_vatop(kd, va, pa)); +} + +static int +_aarch64_native(kvm_t *kd) +{ + +#ifdef __aarch64__ + return (1); +#else + return (0); +#endif } + +struct kvm_arch kvm_aarch64_minidump = { + .ka_probe = _aarch64_minidump_probe, + .ka_initvtop = _aarch64_minidump_initvtop, + .ka_freevtop = _aarch64_minidump_freevtop, + .ka_kvatop = _aarch64_minidump_kvatop, + .ka_native = _aarch64_native, +}; + +KVM_ARCH(kvm_aarch64_minidump); Index: lib/libkvm/kvm_minidump_amd64.c =================================================================== --- lib/libkvm/kvm_minidump_amd64.c +++ lib/libkvm/kvm_minidump_amd64.c @@ -31,112 +31,53 @@ */ #include -#include -#include -#include -#include -#include +#include +#include #include #include #include -#include #include -#include -#include - -#include -#include -#include +#include "../../sys/amd64/include/minidump.h" #include #include "kvm_private.h" +#include "kvm_amd64.h" -struct hpte { - struct hpte *next; - vm_paddr_t pa; - int64_t off; -}; - -#define HPT_SIZE 1024 +#define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE) -/* minidump must be the first item! */ struct vmstate { - int minidump; /* 1 = minidump mode */ struct minidumphdr hdr; - void *hpt_head[HPT_SIZE]; - uint64_t *bitmap; - uint64_t *page_map; + struct hpt hpt; + amd64_pte_t *page_map; }; -static void -hpt_insert(kvm_t *kd, vm_paddr_t pa, int64_t off) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - hpte = malloc(sizeof(*hpte)); - hpte->pa = pa; - hpte->off = off; - hpte->next = kd->vmst->hpt_head[fnv]; - kd->vmst->hpt_head[fnv] = hpte; -} - -static int64_t -hpt_find(kvm_t *kd, vm_paddr_t pa) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { - if (pa == hpte->pa) - return (hpte->off); - } - return (-1); -} - static int -inithash(kvm_t *kd, uint64_t *base, int len, off_t off) +_amd64_minidump_probe(kvm_t *kd) { - uint64_t idx; - uint64_t bit, bits; - vm_paddr_t pa; - - for (idx = 0; idx < len / sizeof(*base); idx++) { - bits = base[idx]; - while (bits) { - bit = bsfq(bits); - bits &= ~(1ul << bit); - pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; - hpt_insert(kd, pa, off); - off += PAGE_SIZE; - } - } - return (off); + + return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_X86_64) && + _kvm_is_minidump(kd)); } -void -_kvm_minidump_freevtop(kvm_t *kd) +static void +_amd64_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm->bitmap) - free(vm->bitmap); + _kvm_hpt_free(&vm->hpt); if (vm->page_map) free(vm->page_map); free(vm); kd->vmst = NULL; } -int -_kvm_minidump_initvtop(kvm_t *kd) +static int +_amd64_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; + uint64_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); @@ -145,7 +86,6 @@ return (-1); } kd->vmst = vmst; - vmst->minidump = 1; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); @@ -160,177 +100,222 @@ * NB: amd64 minidump header is binary compatible between version 1 * and version 2; this may not be the case for the future versions. */ + vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } + vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); + vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); + vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize); + vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase); + vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase); + vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend); /* Skip header and msgbuf */ - off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); + off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize); - vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); - if (vmst->bitmap == NULL) { + bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); + if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } - if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != - vmst->hdr.bitmapsize) { + if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != + (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); + free(bitmap); return (-1); } - off += round_page(vmst->hdr.bitmapsize); + off += amd64_round_page(vmst->hdr.bitmapsize); vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize); if (vmst->page_map == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map", vmst->hdr.pmapsize); + free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) != - vmst->hdr.pmapsize) { + (ssize_t)vmst->hdr.pmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page_map", vmst->hdr.pmapsize); + free(bitmap); return (-1); } off += vmst->hdr.pmapsize; /* build physical address hash table for sparse pages */ - inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); + _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, + AMD64_PAGE_SIZE, sizeof(*bitmap)); + free(bitmap); return (0); } static int -_kvm_minidump_vatop_v1(kvm_t *kd, u_long va, off_t *pa) +_amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - u_long offset; - pt_entry_t pte; - u_long pteindex; - u_long a; + amd64_physaddr_t offset; + amd64_pte_t pte; + kvaddr_t pteindex; + amd64_physaddr_t a; off_t ofs; vm = kd->vmst; - offset = va & (PAGE_SIZE - 1); + offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { - pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; + pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; - pte = vm->page_map[pteindex]; - if (((u_long)pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + pte = le64toh(vm->page_map[pteindex]); + if ((pte & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop_v1: pte not valid"); goto invalid; } - a = pte & PG_FRAME; - ofs = hpt_find(kd, a); + a = pte & AMD64_PG_FRAME; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { - a = (va - vm->hdr.dmapbase) & ~PAGE_MASK; - ofs = hpt_find(kd, a); + a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: direct map address 0x%lx not in minidump", va); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump", + (uintmax_t)va); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); } else { - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop_v1: virtual address 0x%jx not minidumped", + (uintmax_t)va); goto invalid; } invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int -_kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa) +_amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { - pt_entry_t pt[NPTEPG]; + amd64_pte_t pt[AMD64_NPTEPG]; struct vmstate *vm; - u_long offset; - pd_entry_t pde; - pd_entry_t pte; - u_long pteindex; - u_long pdeindex; - u_long a; + amd64_physaddr_t offset; + amd64_pde_t pde; + amd64_pte_t pte; + kvaddr_t pteindex; + kvaddr_t pdeindex; + amd64_physaddr_t a; off_t ofs; vm = kd->vmst; - offset = va & PAGE_MASK; + offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { - pdeindex = (va - vm->hdr.kernbase) >> PDRSHIFT; + pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT; if (pdeindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; - pde = vm->page_map[pdeindex]; - if (((u_long)pde & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pde not valid"); + pde = le64toh(vm->page_map[pdeindex]); + if ((pde & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: pde not valid"); goto invalid; } - if ((pde & PG_PS) == 0) { - a = pde & PG_FRAME; - ofs = hpt_find(kd, a); + if ((pde & AMD64_PG_PS) == 0) { + a = pde & AMD64_PG_FRAME; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: pt physical address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: pt physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } - if (pread(kd->pmfd, &pt, PAGE_SIZE, ofs) != PAGE_SIZE) { - _kvm_err(kd, kd->program, "cannot read %d bytes for pt", PAGE_SIZE); + /* TODO: Just read the single PTE */ + if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) != + AMD64_PAGE_SIZE) { + _kvm_err(kd, kd->program, + "cannot read %d bytes for page table", + AMD64_PAGE_SIZE); return (-1); } - pteindex = (va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1); - pte = pt[pteindex]; - if (((u_long)pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + pteindex = (va >> AMD64_PAGE_SHIFT) & + (AMD64_NPTEPG - 1); + pte = le64toh(pt[pteindex]); + if ((pte & AMD64_PG_V) == 0) { + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: pte not valid"); goto invalid; } - a = pte & PG_FRAME; + a = pte & AMD64_PG_FRAME; } else { - a = pde & PG_PS_FRAME; - a += (va & PDRMASK) ^ offset; + a = pde & AMD64_PG_PS_FRAME; + a += (va & AMD64_PDRMASK) ^ offset; } - ofs = hpt_find(kd, a); + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { - a = (va - vm->hdr.dmapbase) & ~PAGE_MASK; - ofs = hpt_find(kd, a); + a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: direct map address 0x%lx not in minidump", va); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: direct map address 0x%jx not in minidump", + (uintmax_t)va); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (AMD64_PAGE_SIZE - offset); } else { - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); + _kvm_err(kd, kd->program, + "_amd64_minidump_vatop: virtual address 0x%jx not minidumped", + (uintmax_t)va); goto invalid; } invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -int -_kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { - _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); + _kvm_err(kd, 0, + "_amd64_minidump_kvatop called in live kernel!"); return (0); } if (((struct vmstate *)kd->vmst)->hdr.version == 1) - return (_kvm_minidump_vatop_v1(kd, va, pa)); + return (_amd64_minidump_vatop_v1(kd, va, pa)); else - return (_kvm_minidump_vatop(kd, va, pa)); + return (_amd64_minidump_vatop(kd, va, pa)); } + +struct kvm_arch kvm_amd64_minidump = { + .ka_probe = _amd64_minidump_probe, + .ka_initvtop = _amd64_minidump_initvtop, + .ka_freevtop = _amd64_minidump_freevtop, + .ka_kvatop = _amd64_minidump_kvatop, + .ka_native = _amd64_native, +}; + +KVM_ARCH(kvm_amd64_minidump); Index: lib/libkvm/kvm_minidump_arm.c =================================================================== --- lib/libkvm/kvm_minidump_arm.c +++ lib/libkvm/kvm_minidump_arm.c @@ -33,120 +33,54 @@ * ARM machine dependent routines for kvm and minidumps. */ +#include #include -#ifndef CROSS_LIBKVM -#include -#endif -#include -#include -#include -#include +#include +#include +#include #include #include #include -#include -#include - -#ifndef CROSS_LIBKVM -#include -#include -#include -#include -#include -#else -#include "../../sys/arm/include/pte.h" -#include "../../sys/arm/include/vmparam.h" #include "../../sys/arm/include/minidump.h" -#endif - -#include #include "kvm_private.h" +#include "kvm_arm.h" -struct hpte { - struct hpte *next; - uint64_t pa; - int64_t off; -}; +#define arm_round_page(x) roundup2((kvaddr_t)(x), ARM_PAGE_SIZE) -#define HPT_SIZE 1024 - -/* minidump must be the first field */ struct vmstate { - int minidump; /* 1 = minidump mode */ struct minidumphdr hdr; - void *hpt_head[HPT_SIZE]; - uint32_t *bitmap; + struct hpt hpt; void *ptemap; + unsigned char ei_data; }; -static void -hpt_insert(kvm_t *kd, uint64_t pa, int64_t off) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - hpte = malloc(sizeof(*hpte)); - hpte->pa = pa; - hpte->off = off; - hpte->next = kd->vmst->hpt_head[fnv]; - kd->vmst->hpt_head[fnv] = hpte; -} - -static int64_t -hpt_find(kvm_t *kd, uint64_t pa) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) - if (pa == hpte->pa) - return (hpte->off); - - return (-1); -} - static int -inithash(kvm_t *kd, uint32_t *base, int len, off_t off) +_arm_minidump_probe(kvm_t *kd) { - uint64_t idx, pa; - uint32_t bit, bits; - - for (idx = 0; idx < len / sizeof(*base); idx++) { - bits = base[idx]; - while (bits) { - bit = ffs(bits) - 1; - bits &= ~(1ul << bit); - pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; - hpt_insert(kd, pa, off); - off += PAGE_SIZE; - } - } - return (off); + + return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) && + _kvm_is_minidump(kd)); } -void -_kvm_minidump_freevtop(kvm_t *kd) +static void +_arm_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm->bitmap) - free(vm->bitmap); + _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } -int -_kvm_minidump_initvtop(kvm_t *kd) +static int +_arm_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; + uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); @@ -156,7 +90,6 @@ } kd->vmst = vmst; - vmst->minidump = 1; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { @@ -169,34 +102,41 @@ _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } + vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } + vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); + vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); + vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); + vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase); /* Skip header and msgbuf */ - off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); + off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize); - vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); - if (vmst->bitmap == NULL) { + bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); + if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "bitmap", vmst->hdr.bitmapsize); return (-1); } - if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != + if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); + free(bitmap); return (-1); } - off += round_page(vmst->hdr.bitmapsize); + off += arm_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } @@ -204,28 +144,32 @@ (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* Build physical address hash table for sparse pages */ - inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); + _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, + ARM_PAGE_SIZE, sizeof(*bitmap)); + free(bitmap); return (0); } -int -_kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - pt_entry_t pte; - u_long offset, pteindex, a; + arm_pt_entry_t pte; + arm_physaddr_t offset, a; + kvaddr_t pteindex; off_t ofs; - uint32_t *ptemap; + arm_pt_entry_t *ptemap; if (ISALIVE(kd)) { - _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); + _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } @@ -233,36 +177,48 @@ ptemap = vm->ptemap; if (va >= vm->hdr.kernbase) { - pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; - pte = ptemap[pteindex]; + pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; + pte = _kvm32toh(kd, ptemap[pteindex]); if (!pte) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + _kvm_err(kd, kd->program, + "_arm_minidump_kvatop: pte not valid"); goto invalid; } - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) { - offset = va & L2_L_OFFSET; - a = pte & L2_L_FRAME; - } else if ((pte & L2_TYPE_MASK) == L2_TYPE_S) { - offset = va & L2_S_OFFSET; - a = pte & L2_S_FRAME; + if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { + offset = va & ARM_L2_L_OFFSET; + a = pte & ARM_L2_L_FRAME; + } else if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_S) { + offset = va & ARM_L2_S_OFFSET; + a = pte & ARM_L2_S_FRAME; } else goto invalid; - ofs = hpt_find(kd, a); + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical " - "address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " + "physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (ARM_PAGE_SIZE - offset); } else - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx " - "not minidumped", va); + _kvm_err(kd, kd->program, "_arm_minidump_kvatop: virtual " + "address 0x%jx not minidumped", (uintmax_t)va); invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } + +struct kvm_arch kvm_arm_minidump = { + .ka_probe = _arm_minidump_probe, + .ka_initvtop = _arm_minidump_initvtop, + .ka_freevtop = _arm_minidump_freevtop, + .ka_kvatop = _arm_minidump_kvatop, + .ka_native = _arm_native, +}; + +KVM_ARCH(kvm_arm_minidump); Index: lib/libkvm/kvm_minidump_i386.c =================================================================== --- lib/libkvm/kvm_minidump_i386.c +++ lib/libkvm/kvm_minidump_i386.c @@ -27,118 +27,57 @@ __FBSDID("$FreeBSD$"); /* - * AMD64 machine dependent routines for kvm and minidumps. + * i386 machine dependent routines for kvm and minidumps. */ #include -#include -#include -#include -#include -#include +#include +#include #include #include #include -#include #include -#include -#include - -#include -#include -#include +#include "../../sys/i386/include/minidump.h" #include #include "kvm_private.h" +#include "kvm_i386.h" -#define PG_FRAME_PAE (~((uint64_t)PAGE_MASK)) - -struct hpte { - struct hpte *next; - uint64_t pa; - int64_t off; -}; - -#define HPT_SIZE 1024 +#define i386_round_page(x) roundup2((kvaddr_t)(x), I386_PAGE_SIZE) -/* minidump must be the first item! */ struct vmstate { - int minidump; /* 1 = minidump mode */ struct minidumphdr hdr; - void *hpt_head[HPT_SIZE]; - uint32_t *bitmap; + struct hpt hpt; void *ptemap; }; -static void -hpt_insert(kvm_t *kd, uint64_t pa, int64_t off) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - hpte = malloc(sizeof(*hpte)); - hpte->pa = pa; - hpte->off = off; - hpte->next = kd->vmst->hpt_head[fnv]; - kd->vmst->hpt_head[fnv] = hpte; -} - -static int64_t -hpt_find(kvm_t *kd, uint64_t pa) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) { - if (pa == hpte->pa) - return (hpte->off); - } - return (-1); -} - static int -inithash(kvm_t *kd, uint32_t *base, int len, off_t off) +_i386_minidump_probe(kvm_t *kd) { - uint64_t idx; - uint32_t bit, bits; - uint64_t pa; - for (idx = 0; idx < len / sizeof(*base); idx++) { - bits = base[idx]; - while (bits) { - bit = bsfl(bits); - bits &= ~(1ul << bit); - pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; - hpt_insert(kd, pa, off); - off += PAGE_SIZE; - } - } - return (off); + return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_386) && + _kvm_is_minidump(kd)); } -void -_kvm_minidump_freevtop(kvm_t *kd) +static void +_i386_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm->bitmap) - free(vm->bitmap); + _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } -int -_kvm_minidump_initvtop(kvm_t *kd) +static int +_i386_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; + uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); @@ -147,7 +86,6 @@ return (-1); } kd->vmst = vmst; - vmst->minidump = 1; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); @@ -157,135 +95,166 @@ _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } + vmst->hdr.version = le32toh(vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } + vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize); + vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize); + vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize); + vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase); + vmst->hdr.paemode = le32toh(vmst->hdr.paemode); /* Skip header and msgbuf */ - off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); + off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize); - vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); - if (vmst->bitmap == NULL) { + bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); + if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } - if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != + if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); + free(bitmap); return (-1); } - off += round_page(vmst->hdr.bitmapsize); + off += i386_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) != (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* build physical address hash table for sparse pages */ - inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); + _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, + I386_PAGE_SIZE, sizeof(*bitmap)); + free(bitmap); return (0); } static int -_kvm_minidump_vatop_pae(kvm_t *kd, u_long va, off_t *pa) +_i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - uint64_t offset; - uint64_t pte; - u_long pteindex; - uint64_t a; + i386_physaddr_pae_t offset; + i386_pte_pae_t pte; + kvaddr_t pteindex; + i386_physaddr_pae_t a; off_t ofs; - uint64_t *ptemap; + i386_pte_pae_t *ptemap; vm = kd->vmst; ptemap = vm->ptemap; - offset = va & (PAGE_SIZE - 1); + offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { - pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; - pte = ptemap[pteindex]; - if ((pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; + pte = le64toh(ptemap[pteindex]); + if ((pte & I386_PG_V) == 0) { + _kvm_err(kd, kd->program, + "_i386_minidump_vatop_pae: pte not valid"); goto invalid; } - a = pte & PG_FRAME_PAE; - ofs = hpt_find(kd, a); + a = pte & I386_PG_FRAME_PAE; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%llx not in minidump", a); + _kvm_err(kd, kd->program, + "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); } else { - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); + _kvm_err(kd, kd->program, + "_i386_minidump_vatop_pae: virtual address 0x%jx not minidumped", + (uintmax_t)va); goto invalid; } invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } static int -_kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa) +_i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - u_long offset; - pt_entry_t pte; - u_long pteindex; - u_long a; + i386_physaddr_t offset; + i386_pte_t pte; + kvaddr_t pteindex; + i386_physaddr_t a; off_t ofs; - uint32_t *ptemap; + i386_pte_t *ptemap; vm = kd->vmst; ptemap = vm->ptemap; - offset = va & (PAGE_SIZE - 1); + offset = va & I386_PAGE_MASK; if (va >= vm->hdr.kernbase) { - pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; - pte = ptemap[pteindex]; - if ((pte & PG_V) == 0) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT; + pte = le32toh(ptemap[pteindex]); + if ((pte & I386_PG_V) == 0) { + _kvm_err(kd, kd->program, + "_i386_minidump_vatop: pte not valid"); goto invalid; } - a = pte & PG_FRAME; - ofs = hpt_find(kd, a); + a = pte & I386_PG_FRAME; + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, + "_i386_minidump_vatop: physical address 0x%jx not in minidump", + (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (I386_PAGE_SIZE - offset); } else { - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx not minidumped", va); + _kvm_err(kd, kd->program, + "_i386_minidump_vatop: virtual address 0x%jx not minidumped", + (uintmax_t)va); goto invalid; } invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } -int -_kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { if (ISALIVE(kd)) { - _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); + _kvm_err(kd, 0, "_i386_minidump_kvatop called in live kernel!"); return (0); } if (kd->vmst->hdr.paemode) - return (_kvm_minidump_vatop_pae(kd, va, pa)); + return (_i386_minidump_vatop_pae(kd, va, pa)); else - return (_kvm_minidump_vatop(kd, va, pa)); + return (_i386_minidump_vatop(kd, va, pa)); } + +struct kvm_arch kvm_i386_minidump = { + .ka_probe = _i386_minidump_probe, + .ka_initvtop = _i386_minidump_initvtop, + .ka_freevtop = _i386_minidump_freevtop, + .ka_kvatop = _i386_minidump_kvatop, + .ka_native = _i386_native, +}; + +KVM_ARCH(kvm_i386_minidump); Index: lib/libkvm/kvm_minidump_mips.c =================================================================== --- lib/libkvm/kvm_minidump_mips.c +++ lib/libkvm/kvm_minidump_mips.c @@ -35,112 +35,57 @@ */ #include -#include -#include -#include -#include -#include +#include +#include +#include #include #include #include -#include -#include - -#include -#include -#include -#include -#include - -#include +#include "../../sys/mips/include/cpuregs.h" +#include "../../sys/mips/include/minidump.h" #include "kvm_private.h" +#include "kvm_mips.h" -struct hpte { - struct hpte *next; - uint64_t pa; - int64_t off; -}; - -#define HPT_SIZE 1024 +#define mips_round_page(x) roundup2((kvaddr_t)(x), MIPS_PAGE_SIZE) -/* minidump must be the first field */ struct vmstate { - int minidump; /* 1 = minidump mode */ struct minidumphdr hdr; - void *hpt_head[HPT_SIZE]; - uint32_t *bitmap; + struct hpt hpt; void *ptemap; + int pte_size; }; -static void -hpt_insert(kvm_t *kd, uint64_t pa, int64_t off) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - hpte = malloc(sizeof(*hpte)); - hpte->pa = pa; - hpte->off = off; - hpte->next = kd->vmst->hpt_head[fnv]; - kd->vmst->hpt_head[fnv] = hpte; -} - -static int64_t -hpt_find(kvm_t *kd, uint64_t pa) -{ - struct hpte *hpte; - uint32_t fnv = FNV1_32_INIT; - - fnv = fnv_32_buf(&pa, sizeof(pa), fnv); - fnv &= (HPT_SIZE - 1); - for (hpte = kd->vmst->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) - if (pa == hpte->pa) - return (hpte->off); - - return (-1); -} - static int -inithash(kvm_t *kd, uint32_t *base, int len, off_t off) +_mips_minidump_probe(kvm_t *kd) { - uint64_t idx, pa; - uint32_t bit, bits; - - for (idx = 0; idx < len / sizeof(*base); idx++) { - bits = base[idx]; - while (bits) { - bit = ffs(bits) - 1; - bits &= ~(1ul << bit); - pa = (idx * sizeof(*base) * NBBY + bit) * PAGE_SIZE; - hpt_insert(kd, pa, off); - off += PAGE_SIZE; - } - } - return (off); + if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32 && + kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) + return (0); + if (kd->nlehdr.e_machine != EM_MIPS) + return (0); + return (_kvm_is_minidump(kd)); } -void -_kvm_minidump_freevtop(kvm_t *kd) +static void +_mips_minidump_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm->bitmap) - free(vm->bitmap); + _kvm_hpt_free(&vm->hpt); if (vm->ptemap) free(vm->ptemap); free(vm); kd->vmst = NULL; } -int -_kvm_minidump_initvtop(kvm_t *kd) +static int +_mips_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; + uint32_t *bitmap; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); @@ -150,9 +95,13 @@ } kd->vmst = vmst; - vmst->minidump = 1; - off = lseek(kd->pmfd, 0, SEEK_CUR); + if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 || + kd->nlehdr.e_flags & EF_MIPS_ABI2) + vmst->pte_size = 64; + else + vmst->pte_size = 32; + if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); @@ -164,34 +113,43 @@ _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } + vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version); if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. " "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } + vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize); + vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize); + vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize); + vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase); + vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase); + vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend); /* Skip header and msgbuf */ - off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); + off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize); - vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); - if (vmst->bitmap == NULL) { + bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); + if (bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "bitmap", vmst->hdr.bitmapsize); return (-1); } - if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != + if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); + free(bitmap); return (-1); } - off += round_page(vmst->hdr.bitmapsize); + off += mips_round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for " "ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } @@ -199,75 +157,139 @@ (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); + free(bitmap); return (-1); } off += vmst->hdr.ptesize; /* Build physical address hash table for sparse pages */ - inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); + _kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off, + MIPS_PAGE_SIZE, sizeof(*bitmap)); + free(bitmap); return (0); } -int -_kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; - pt_entry_t pte; - u_long offset, pteindex, a; + uint64_t pte; + mips_physaddr_t offset, a; + kvaddr_t pteindex; off_t ofs; - pt_entry_t *ptemap; + uint32_t *ptemap32; + uint64_t *ptemap64; if (ISALIVE(kd)) { - _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); + _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!"); return (0); } - offset = va & PAGE_MASK; + offset = va & MIPS_PAGE_MASK; /* Operate with page-aligned address */ - va &= ~PAGE_MASK; + va &= ~MIPS_PAGE_MASK; vm = kd->vmst; - ptemap = vm->ptemap; + ptemap32 = vm->ptemap; + ptemap64 = vm->ptemap; -#if defined(__mips_n64) - if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) - a = (MIPS_XKPHYS_TO_PHYS(va)); - else -#endif - if (va >= (u_long)MIPS_KSEG0_START && va < (u_long)MIPS_KSEG0_END) - a = (MIPS_KSEG0_TO_PHYS(va)); - else if (va >= (u_long)MIPS_KSEG1_START && va < (u_long)MIPS_KSEG1_END) - a = (MIPS_KSEG1_TO_PHYS(va)); - else if (va >= vm->hdr.kernbase) { - pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; - pte = ptemap[pteindex]; + if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { + if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) { + a = va & MIPS_XKPHYS_PHYS_MASK; + goto found; + } + if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) { + a = va & MIPS_KSEG0_PHYS_MASK; + goto found; + } + if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) { + a = va & MIPS_KSEG0_PHYS_MASK; + goto found; + } + } else { + if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) { + a = va & MIPS_KSEG0_PHYS_MASK; + goto found; + } + if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) { + a = va & MIPS_KSEG0_PHYS_MASK; + goto found; + } + } + if (va >= vm->hdr.kernbase) { + pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT; + if (vm->pte_size == 64) { + pte = _kvm64toh(kd, ptemap64[pteindex]); + a = MIPS64_PTE_TO_PA(pte); + } else { + pte = _kvm32toh(kd, ptemap32[pteindex]); + a = MIPS32_PTE_TO_PA(pte); + } if (!pte) { - _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); + _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte " + "not valid"); goto invalid; } - - a = TLBLO_PTE_TO_PA(pte); - } else { - _kvm_err(kd, kd->program, "_kvm_vatop: virtual address 0x%lx " - "not minidumped", va); + _kvm_err(kd, kd->program, "_mips_minidump_kvatop: virtual " + "address 0x%jx not minidumped", (uintmax_t)va); return (0); } - ofs = hpt_find(kd, a); +found: + ofs = _kvm_hpt_find(&vm->hpt, a); if (ofs == -1) { - _kvm_err(kd, kd->program, "_kvm_vatop: physical " - "address 0x%lx not in minidump", a); + _kvm_err(kd, kd->program, "_mips_minidump_kvatop: physical " + "address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; - return (PAGE_SIZE - offset); + return (MIPS_PAGE_SIZE - offset); invalid: - _kvm_err(kd, 0, "invalid address (0x%lx)", va); + _kvm_err(kd, 0, "invalid address (0x%jx)", (uintmax_t)va); return (0); } + +static int +_mips_native(kvm_t *kd) +{ + +#ifdef __mips__ +#ifdef __mips_n64 + if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS64) + return (0); +#else + if (kd->nlehdr.e_ident[EI_CLASS] != ELFCLASS32) + return (0); +#ifdef __mips_n32 + if (!(kd->nlehdr.e_flags & EF_MIPS_ABI2)) + return (0); +#else + if (kd->nlehdr.e_flags & EF_MIPS_ABI2) + return (0); +#endif +#endif +#if _BYTE_ORDER == _LITTLE_ENDIAN + return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB); +#else + return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); +#endif +#else + return (0); +#endif +} + +struct kvm_arch kvm_mips_minidump = { + .ka_probe = _mips_minidump_probe, + .ka_initvtop = _mips_minidump_initvtop, + .ka_freevtop = _mips_minidump_freevtop, + .ka_kvatop = _mips_minidump_kvatop, + .ka_native = _mips_native, +}; + +KVM_ARCH(kvm_mips_minidump); Index: lib/libkvm/kvm_mips.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_mips.h @@ -0,0 +1,93 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_MIPS_H__ +#define __KVM_MIPS_H__ + +#ifdef __mips__ +#include +#endif + +typedef uint64_t mips_physaddr_t; + +#define MIPS_PAGE_SHIFT 12 +#define MIPS_PAGE_SIZE (1 << MIPS_PAGE_SHIFT) +#define MIPS_PAGE_MASK (MIPS_PAGE_SIZE - 1) + +#define MIPS32_KSEG0_START 0x80000000 +#define MIPS32_KSEG0_END 0x9fffffff +#define MIPS32_KSEG1_START 0xa0000000 +#define MIPS32_KSEG1_END 0xbfffffff +#define MIPS64_KSEG0_START 0xffffffff80000000 +#define MIPS64_KSEG0_END 0xffffffff9fffffff +#define MIPS64_KSEG1_START 0xffffffffa0000000 +#define MIPS64_KSEG1_END 0xffffffffbfffffff + +#define MIPS32_PFN_MASK (0x1FFFFFC0) +#define MIPS64_PFN_MASK 0x3FFFFFFC0 +#define MIPS_PFN_SHIFT (6) + +#define MIPS_PFN_TO_PA(pfn) (((pfn) >> MIPS_PFN_SHIFT) << MIPS_PAGE_SHIFT) +#define MIPS32_PTE_TO_PFN(pte) ((pte) & MIPS32_PFN_MASK) +#define MIPS32_PTE_TO_PA(pte) (MIPS_PFN_TO_PA(MIPS32_PTE_TO_PFN((pte)))) +#define MIPS64_PTE_TO_PFN(pte) ((pte) & MIPS64_PFN_MASK) +#define MIPS64_PTE_TO_PA(pte) (MIPS_PFN_TO_PA(MIPS64_PTE_TO_PFN((pte)))) + +#ifdef __mips__ +_Static_assert(PAGE_SHIFT == MIPS_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == MIPS_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == MIPS_PAGE_MASK, "PAGE_MASK mismatch"); +#ifdef __mips_n64 +_Static_assert((uint64_t)MIPS_KSEG0_START == MIPS64_KSEG0_START, + "MIPS_KSEG0_START mismatch"); +_Static_assert((uint64_t)MIPS_KSEG0_END == MIPS64_KSEG0_END, + "MIPS_KSEG0_END mismatch"); +_Static_assert((uint64_t)MIPS_KSEG1_START == MIPS64_KSEG1_START, + "MIPS_KSEG1_START mismatch"); +_Static_assert((uint64_t)MIPS_KSEG1_END == MIPS64_KSEG1_END, + "MIPS_KSEG1_END mismatch"); +#else +_Static_assert((uint32_t)MIPS_KSEG0_START == MIPS32_KSEG0_START, + "MIPS_KSEG0_START mismatch"); +_Static_assert((uint32_t)MIPS_KSEG0_END == MIPS32_KSEG0_END, + "MIPS_KSEG0_END mismatch"); +_Static_assert((uint32_t)MIPS_KSEG1_START == MIPS32_KSEG1_START, + "MIPS_KSEG1_START mismatch"); +_Static_assert((uint32_t)MIPS_KSEG1_END == MIPS32_KSEG1_END, + "MIPS_KSEG1_END mismatch"); +#endif +#if defined(__mips_n64) || defined(__mips_n32) +_Static_assert(TLBLO_PFN_MASK == MIPS64_PFN_MASK, "TLBLO_PFN_MASK mismatch"); +#else +_Static_assert(TLBLO_PFN_MASK == MIPS32_PFN_MASK, "TLBLO_PFN_MASK mismatch"); +#endif +_Static_assert(TLBLO_PFN_SHIFT == MIPS_PFN_SHIFT, "TLBLO_PFN_SHIFT mismatch"); +_Static_assert(TLB_PAGE_SHIFT == MIPS_PAGE_SHIFT, "TLB_PAGE_SHIFT mismatch"); +#endif + +#endif /* !__KVM_MIPS_H__ */ Index: lib/libkvm/kvm_native.3 =================================================================== --- /dev/null +++ lib/libkvm/kvm_native.3 @@ -0,0 +1,62 @@ +.\" +.\" Copyright (c) 2015 John Baldwin +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd October 30, 2015 +.Dt kvm_native 3 +.Os +.Sh NAME +.Nm kvm_native +.Nd is a kvm descriptor opened on a native kernel image +.Sh LIBRARY +.Lb libkvm +.Sh SYNOPSIS +.In kvm.h +.Ft int +.Fn kvm_native "kvm_t *kd" +.Sh DESCRIPTION +The +.Nm kvm +library provides an interface for accessing kernel virtual memory images +for both native kernel images +.Pq where the ABI of the kernel executable matches the host system +and non-native kernel images. +The +.Fn kvm_native +function returns a non-zero value if the kvm descriptor +.Fa kd +is attached to a native kernel image; +otherwise it returns zero. +.Sh RETURN VALUES +The +.Fn kvm_native +function returns a non-zero value if the kvm descriptor +.Fa kd +is attached to a native kernel image; +otherwise it returns zero. +.Sh SEE ALSO +.Xr kvm 3 , +.Xr kvm_open2 3 Index: lib/libkvm/kvm_nlist.3 =================================================================== --- lib/libkvm/kvm_nlist.3 +++ lib/libkvm/kvm_nlist.3 @@ -36,7 +36,8 @@ .Dt KVM_NLIST 3 .Os .Sh NAME -.Nm kvm_nlist +.Nm kvm_nlist , +.Nm kvm_nlist2 .Nd retrieve symbol table names from a kernel image .Sh LIBRARY .Lb libkvm @@ -45,31 +46,62 @@ .In nlist.h .Ft int .Fn kvm_nlist "kvm_t *kd" "struct nlist *nl" +.Ft int +.Fn kvm_nlist2 "kvm_t *kd" "struct kvm_nlist *nl" .Sh DESCRIPTION The .Fn kvm_nlist function retrieves the symbol table entries indicated by the name list argument .Fa \&nl . This argument points to an array of nlist structures, terminated by -an entry whose n_name field is +an entry whose +.Fa n_name +field is .Dv NULL (see .Xr nlist 3 ) . -Each symbol is looked up using the n_name field, and if found, the -corresponding n_type and n_value fields are filled in. +Each symbol is looked up using the +.Fa n_name +field, and if found, the +corresponding +.Fa n_type +and +.Fa n_value +fields are filled in. These fields are set to 0 if the symbol is not found. .Pp The .Xr kldsym 2 -system call is used to locate the symbol. +system call is used to locate symbols in live kernels. This is a less than perfect emulation of the nlist values but has the advantage of being aware of kernel modules and is reasonably fast. +.Pp +The +.Fn kvm_nlist2 +function retrieves the symbol table entries indicated by the name list argument +.Fa nl . +This argument points to an array of +.Vt "struct kvm_nlist" +structures, +terminated by an entry whose +.Fa n_name +field is +.Dv NULL +These structures are similar to the nlist structures used by +.Fn kvm_nlist +except that the +.Fa n_value +field uses a different type +.Pq Vt kvaddr_t +to avoid truncation when examining non-native kernel images. .Sh RETURN VALUES The .Fn kvm_nlist -function returns the number of invalid entries found. +and +.Fn kvm_nlist2 +functions return the number of invalid entries found. If the kernel symbol table was unreadable, -1 is returned. .Sh SEE ALSO .Xr kldsym 2 , @@ -79,6 +111,7 @@ .Xr kvm_getenvv 3 , .Xr kvm_geterr 3 , .Xr kvm_getprocs 3 , +.Xr kvm_native 3 , .Xr kvm_open 3 , .Xr kvm_openfiles 3 , .Xr kvm_read 3 , Index: lib/libkvm/kvm_open.3 =================================================================== --- lib/libkvm/kvm_open.3 +++ lib/libkvm/kvm_open.3 @@ -37,6 +37,7 @@ .Os .Sh NAME .Nm kvm_open , +.Nm kvm_open2 , .Nm kvm_openfiles , .Nm kvm_close .Nd initialize kernel virtual memory access @@ -48,12 +49,21 @@ .Ft kvm_t * .Fn kvm_open "const char *execfile" "const char *corefile" "const char *swapfile" "int flags" "const char *errstr" .Ft kvm_t * +.Fo kvm_open2 +.Fa "const char *execfile" +.Fa "const char *corefile" +.Fa "int flags" +.Fa "char *errbuf" +.Fa "int (*resolver)(const char *name, kvaddr_t *addr)" +.Fc +.Ft kvm_t * .Fn kvm_openfiles "const char *execfile" "const char *corefile" "const char *swapfile" "int flags" "char *errbuf" .Ft int .Fn kvm_close "kvm_t *kd" .Sh DESCRIPTION The functions -.Fn kvm_open +.Fn kvm_open , +.Fn kvm_open2 , and .Fn kvm_openfiles return a descriptor used to access kernel virtual memory @@ -111,10 +121,13 @@ .Dv O_RDWR are permitted. .Pp -There are two open routines which differ only with respect to -the error mechanism. +The +.Nm kvm +library provides two different error reporting mechanisms. One provides backward compatibility with the SunOS kvm library, while the other provides an improved error reporting framework. +The mechanism used by a descriptor is determined by the function used to +open the descriptor. .Pp The .Fn kvm_open @@ -140,8 +153,10 @@ call. .Pp The +.Fn kvm_open2 +and .Fn kvm_openfiles -function provides +functions provide .Bx style error reporting. Here, error messages are not printed out by the library. @@ -160,25 +175,56 @@ .Fn kvm_geterr cannot be used to get the error message if open fails. Thus, +.Fn kvm_open2 +and .Fn kvm_openfiles will place any error message in the .Fa errbuf argument. This buffer should be _POSIX2_LINE_MAX characters large (from ). +.Pp +The +.Fa resolver +argument points to a function used by the +.Nm kvm +library to map symbol names to kernel virtual addresses. +When the +.Fa resolver +function is called, +.Fa name +specifies the requested symbol name. +If the function is able to resolve the name to an address, +the address should be set in +.Fa *addr +and the function should return zero. +If the function is not able to resolve the name to an address, +it should return a non-zero value. +When opening a native kernel image, +.Fa resolver +may be set to +.Dv NULL +to use an internal function to resolve symbol names. +Non-native kernel images +.Pq such as when cross-debugging a crash dump +require a valid +.Fa resolver . .Sh RETURN VALUES The -.Fn kvm_open +.Fn kvm_open , +.Fn kvm_open2 , and .Fn kvm_openfiles -functions both return a descriptor to be used +functions return a descriptor to be used in all subsequent kvm library calls. The library is fully re-entrant. On failure, .Dv NULL is returned, in which case +.Fn kvm_open2 +and .Fn kvm_openfiles -writes the error message into +write the error message into .Fa errbuf . .Pp The @@ -191,13 +237,14 @@ .Xr kvm_getenvv 3 , .Xr kvm_geterr 3 , .Xr kvm_getprocs 3 , +.Xr kvm_native 3 , .Xr kvm_nlist 3 , .Xr kvm_read 3 , .Xr kvm_write 3 , .Xr kmem 4 , .Xr mem 4 .Sh BUGS -There should not be two open calls. +There should not be three open calls. The ill-defined error semantics of the Sun library and the desire to have a backward-compatible library for Index: lib/libkvm/kvm_pcpu.c =================================================================== --- lib/libkvm/kvm_pcpu.c +++ lib/libkvm/kvm_pcpu.c @@ -216,7 +216,7 @@ static int _kvm_dpcpu_init(kvm_t *kd) { - struct nlist nl[] = { + struct kvm_nlist nl[] = { #define NLIST_START_SET_PCPU 0 { .n_name = "___start_" DPCPU_SETNAME }, #define NLIST_STOP_SET_PCPU 1 @@ -232,6 +232,12 @@ u_int dpcpu_maxcpus; /* + * XXX: This only works for native kernels for now. + */ + if (!kvm_native(kd)) + return (-1); + + /* * Locate and cache locations of important symbols using the internal * version of _kvm_nlist, turning off initialization to avoid * recursion in case of unresolveable symbols. @@ -279,8 +285,8 @@ * Check whether the value is within the dpcpu symbol range and only if so * adjust the offset relative to the current offset. */ -uintptr_t -_kvm_dpcpu_validaddr(kvm_t *kd, uintptr_t value) +kvaddr_t +_kvm_dpcpu_validaddr(kvm_t *kd, kvaddr_t value) { if (value == 0) @@ -319,6 +325,8 @@ kvm_read_zpcpu(kvm_t *kd, u_long base, void *buf, size_t size, int cpu) { + if (!kvm_native(kd)) + return (-1); return (kvm_read(kd, (uintptr_t)(base + sizeof(struct pcpu) * cpu), buf, size)); } Index: lib/libkvm/kvm_powerpc.c =================================================================== --- lib/libkvm/kvm_powerpc.c +++ lib/libkvm/kvm_powerpc.c @@ -34,12 +34,9 @@ #include #include -#include - -#include #include -#include #include +#include #include #include @@ -100,7 +97,7 @@ vm = kd->vmst; - vm->mapsz = PAGE_SIZE; + vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader); vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile"); @@ -130,7 +127,7 @@ vm->mapsz = vm->dmphdrsz + mapsz; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { - _kvm_err(kd, kd->program, "cannot map corefle headers"); + _kvm_err(kd, kd->program, "cannot map corefile headers"); return (-1); } vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); @@ -138,8 +135,6 @@ return (0); inval: - munmap(vm->map, vm->mapsz); - vm->map = MAP_FAILED; _kvm_err(kd, kd->program, "invalid corefile"); return (-1); } @@ -150,7 +145,7 @@ * 0 when the virtual address is invalid. */ static size_t -powerpc_va2off(kvm_t *kd, u_long va, off_t *ofs) +powerpc_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm = kd->vmst; Elf32_Phdr *ph; @@ -172,48 +167,69 @@ return (be32toh(ph->p_memsz) - (va - be32toh(ph->p_vaddr))); } -void -_kvm_freevtop(kvm_t *kd) +static void +_powerpc_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm == NULL) - return; - - if (vm->eh != MAP_FAILED) { + if (vm->eh != MAP_FAILED) munmap(vm->eh, vm->mapsz); - vm->eh = MAP_FAILED; - } free(vm); kd->vmst = NULL; } -int -_kvm_initvtop(kvm_t *kd) +static int +_powerpc_probe(kvm_t *kd) +{ + + return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_PPC) && + kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); +} + +static int +_powerpc_initvtop(kvm_t *kd) { kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); - if (kd->vmst == NULL) { - _kvm_err(kd, kd->program, "out of virtual memory"); + if (kd->vmst == NULL) return (-1); - } - if (powerpc_maphdrs(kd) == -1) { - free(kd->vmst); - kd->vmst = NULL; + + if (powerpc_maphdrs(kd) == -1) return (-1); - } + return (0); } -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs) +static int +_powerpc_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm; vm = kd->vmst; - if (vm->ph->p_paddr == ~0U) + if (be32toh(vm->ph->p_paddr) == 0xffffffff) return ((int)powerpc_va2off(kd, va, ofs)); _kvm_err(kd, kd->program, "Raw corefile not supported"); return (0); } + +static int +_powerpc_native(kvm_t *kd) +{ + +#if defined(__powerpc__) && !defined(__powerpc64__) + return (1); +#else + return (0); +#endif +} + +struct kvm_arch kvm_powerpc = { + .ka_probe = _powerpc_probe, + .ka_initvtop = _powerpc_initvtop, + .ka_freevtop = _powerpc_freevtop, + .ka_kvatop = _powerpc_kvatop, + .ka_native = _powerpc_native, +}; + +KVM_ARCH(kvm_powerpc); Index: lib/libkvm/kvm_powerpc64.c =================================================================== --- lib/libkvm/kvm_powerpc64.c +++ lib/libkvm/kvm_powerpc64.c @@ -34,12 +34,9 @@ #include #include -#include - -#include #include -#include #include +#include #include #include @@ -100,7 +97,7 @@ vm = kd->vmst; - vm->mapsz = PAGE_SIZE; + vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader); vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile"); @@ -130,16 +127,15 @@ vm->mapsz = vm->dmphdrsz + mapsz; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { - _kvm_err(kd, kd->program, "cannot map corefle headers"); + _kvm_err(kd, kd->program, "cannot map corefile headers"); return (-1); } vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); - vm->ph = (void *)((uintptr_t)vm->eh + be64toh(vm->eh->e_phoff)); + vm->ph = (void *)((uintptr_t)vm->eh + + (uintptr_t)be64toh(vm->eh->e_phoff)); return (0); inval: - munmap(vm->map, vm->mapsz); - vm->map = MAP_FAILED; _kvm_err(kd, kd->program, "invalid corefile"); return (-1); } @@ -150,7 +146,7 @@ * 0 when the virtual address is invalid. */ static size_t -powerpc64_va2off(kvm_t *kd, u_long va, off_t *ofs) +powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm = kd->vmst; Elf64_Phdr *ph; @@ -172,48 +168,69 @@ return (be64toh(ph->p_memsz) - (va - be64toh(ph->p_vaddr))); } -void -_kvm_freevtop(kvm_t *kd) +static void +_powerpc64_freevtop(kvm_t *kd) { struct vmstate *vm = kd->vmst; - if (vm == NULL) - return; - - if (vm->eh != MAP_FAILED) { + if (vm->eh != MAP_FAILED) munmap(vm->eh, vm->mapsz); - vm->eh = MAP_FAILED; - } free(vm); kd->vmst = NULL; } -int -_kvm_initvtop(kvm_t *kd) +static int +_powerpc64_probe(kvm_t *kd) +{ + + return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) && + kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB); +} + +static int +_powerpc64_initvtop(kvm_t *kd) { kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst)); - if (kd->vmst == NULL) { - _kvm_err(kd, kd->program, "out of virtual memory"); + if (kd->vmst == NULL) return (-1); - } - if (powerpc_maphdrs(kd) == -1) { - free(kd->vmst); - kd->vmst = NULL; + + if (powerpc_maphdrs(kd) == -1) return (-1); - } + return (0); } -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs) +static int +_powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs) { struct vmstate *vm; vm = kd->vmst; - if (vm->ph->p_paddr == ~0UL) + if (be64toh(vm->ph->p_paddr) == 0xffffffffffffffff) return ((int)powerpc64_va2off(kd, va, ofs)); _kvm_err(kd, kd->program, "Raw corefile not supported"); return (0); } + +static int +_powerpc64_native(kvm_t *kd) +{ + +#ifdef __powerpc64__ + return (1); +#else + return (0); +#endif +} + +struct kvm_arch kvm_powerpc64 = { + .ka_probe = _powerpc64_probe, + .ka_initvtop = _powerpc64_initvtop, + .ka_freevtop = _powerpc64_freevtop, + .ka_kvatop = _powerpc64_kvatop, + .ka_native = _powerpc64_native, +}; + +KVM_ARCH(kvm_powerpc64); Index: lib/libkvm/kvm_private.h =================================================================== --- lib/libkvm/kvm_private.h +++ lib/libkvm/kvm_private.h @@ -34,7 +34,22 @@ * $FreeBSD$ */ +#include +#include +#include + +struct kvm_arch { + int (*ka_probe)(kvm_t *); + int (*ka_initvtop)(kvm_t *); + void (*ka_freevtop)(kvm_t *); + int (*ka_kvatop)(kvm_t *, kvaddr_t, off_t *); + int (*ka_native)(kvm_t *); +}; + +#define KVM_ARCH(ka) DATA_SET(kvm_arch, ka) + struct __kvm { + struct kvm_arch *arch; /* * a string to be prepended to error messages * provided for compatibility with sun's interface @@ -46,8 +61,9 @@ #define ISALIVE(kd) ((kd)->vmfd >= 0) int pmfd; /* physical memory file (or crashdump) */ int vmfd; /* virtual memory file (-1 if crashdump) */ - int unused; /* was: swap file (e.g., /dev/drum) */ int nlfd; /* namelist file (e.g., /kernel) */ + GElf_Ehdr nlehdr; /* ELF file header for namelist file */ + int (*resolve_symbol)(const char *, kvaddr_t *); struct kinfo_proc *procbase; char *argspc; /* (dynamic) storage for argv strings */ int arglen; /* length of the above */ @@ -64,10 +80,10 @@ int rawdump; /* raw dump format */ int vnet_initialized; /* vnet fields set up */ - uintptr_t vnet_start; /* start of kernel's vnet region */ - uintptr_t vnet_stop; /* stop of kernel's vnet region */ - uintptr_t vnet_current; /* vnet we're working with */ - uintptr_t vnet_base; /* vnet base of current vnet */ + kvaddr_t vnet_start; /* start of kernel's vnet region */ + kvaddr_t vnet_stop; /* stop of kernel's vnet region */ + kvaddr_t vnet_current; /* vnet we're working with */ + kvaddr_t vnet_base; /* vnet base of current vnet */ /* * Dynamic per-CPU kernel memory. We translate symbols, on-demand, @@ -75,38 +91,69 @@ * kvm_dpcpu_setcpu(). */ int dpcpu_initialized; /* dpcpu fields set up */ - uintptr_t dpcpu_start; /* start of kernel's dpcpu region */ - uintptr_t dpcpu_stop; /* stop of kernel's dpcpu region */ + kvaddr_t dpcpu_start; /* start of kernel's dpcpu region */ + kvaddr_t dpcpu_stop; /* stop of kernel's dpcpu region */ u_int dpcpu_maxcpus; /* size of base array */ uintptr_t *dpcpu_off; /* base array, indexed by CPU ID */ u_int dpcpu_curcpu; /* CPU we're currently working with */ - uintptr_t dpcpu_curoff; /* dpcpu base of current CPU */ + kvaddr_t dpcpu_curoff; /* dpcpu base of current CPU */ +}; + +/* + * Page table hash used by minidump backends to map physical addresses + * to file offsets. + */ +struct hpte { + struct hpte *next; + uint64_t pa; + off_t off; +}; + +#define HPT_SIZE 1024 + +struct hpt { + struct hpte *hpt_head[HPT_SIZE]; }; /* * Functions used internally by kvm, but across kvm modules. */ +static inline uint32_t +_kvm32toh(kvm_t *kd, uint32_t val) +{ + + if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB) + return (le32toh(val)); + else + return (be32toh(val)); +} + +static inline uint64_t +_kvm64toh(kvm_t *kd, uint64_t val) +{ + + if (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB) + return (le64toh(val)); + else + return (be64toh(val)); +} + void _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...) __printflike(3, 4); void _kvm_freeprocs(kvm_t *kd); -void _kvm_freevtop(kvm_t *); -int _kvm_initvtop(kvm_t *); -int _kvm_kvatop(kvm_t *, u_long, off_t *); void *_kvm_malloc(kvm_t *kd, size_t); -int _kvm_nlist(kvm_t *, struct nlist *, int); +int _kvm_nlist(kvm_t *, struct kvm_nlist *, int); void *_kvm_realloc(kvm_t *kd, void *, size_t); void _kvm_syserr (kvm_t *kd, const char *program, const char *fmt, ...) __printflike(3, 4); -int _kvm_uvatop(kvm_t *, const struct proc *, u_long, u_long *); int _kvm_vnet_selectpid(kvm_t *, pid_t); int _kvm_vnet_initialized(kvm_t *, int); -uintptr_t _kvm_vnet_validaddr(kvm_t *, uintptr_t); +kvaddr_t _kvm_vnet_validaddr(kvm_t *, kvaddr_t); int _kvm_dpcpu_initialized(kvm_t *, int); -uintptr_t _kvm_dpcpu_validaddr(kvm_t *, uintptr_t); - -#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \ - defined(__i386__) || defined(__mips__) -void _kvm_minidump_freevtop(kvm_t *); -int _kvm_minidump_initvtop(kvm_t *); -int _kvm_minidump_kvatop(kvm_t *, u_long, off_t *); -#endif +kvaddr_t _kvm_dpcpu_validaddr(kvm_t *, kvaddr_t); +int _kvm_probe_elf_kernel(kvm_t *, int, int); +int _kvm_is_minidump(kvm_t *); +int _kvm_read_core_phdrs(kvm_t *, size_t *, GElf_Phdr **); +void _kvm_hpt_init(kvm_t *, struct hpt *, void *, size_t, off_t, int, int); +off_t _kvm_hpt_find(struct hpt *, uint64_t); +void _kvm_hpt_free(struct hpt *); Index: lib/libkvm/kvm_proc.c =================================================================== --- lib/libkvm/kvm_proc.c +++ lib/libkvm/kvm_proc.c @@ -582,6 +582,12 @@ nl[5].n_name = "_cpu_tick_frequency"; nl[6].n_name = 0; + if (!kd->arch->ka_native(kd)) { + _kvm_err(kd, kd->program, + "cannot read procs from non-native core"); + return (0); + } + if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; Index: lib/libkvm/kvm_read.3 =================================================================== --- lib/libkvm/kvm_read.3 +++ lib/libkvm/kvm_read.3 @@ -37,6 +37,7 @@ .Os .Sh NAME .Nm kvm_read , +.Nm kvm_read2 , .Nm kvm_write .Nd read or write kernel virtual memory .Sh LIBRARY @@ -46,23 +47,26 @@ .Ft ssize_t .Fn kvm_read "kvm_t *kd" "unsigned long addr" "void *buf" "size_t nbytes" .Ft ssize_t +.Fn kvm_read2 "kvm_t *kd" "kvaddr_t addr" "void *buf" "size_t nbytes" +.Ft ssize_t .Fn kvm_write "kvm_t *kd" "unsigned long addr" "const void *buf" "size_t nbytes" .Sh DESCRIPTION The -.Fn kvm_read +.Fn kvm_read , +.Fn kvm_read2 , and .Fn kvm_write functions are used to read and write kernel virtual memory (or a crash dump file). See .Fn kvm_open 3 -or -.Fn kvm_openfiles 3 for information regarding opening kernel virtual memory and crash dumps. .Pp The .Fn kvm_read -function transfers +and +.Fn kvm_read2 +functions transfer .Fa nbytes bytes of data from the kernel space address @@ -77,6 +81,16 @@ .Fa addr . Unlike their SunOS counterparts, these functions cannot be used to read or write process address spaces. +.Pp +The +.Fn kvm_read2 +function uses a different type +.Pq Vt kvaddr_t +for the +.Fa addr +argument to allow use of addresses larger than +.Dv ULONG_MAX +when examining non-native kernel images. .Sh RETURN VALUES Upon success, the number of bytes actually transferred is returned. Otherwise, -1 is returned. Index: lib/libkvm/kvm_sparc64.h =================================================================== --- /dev/null +++ lib/libkvm/kvm_sparc64.h @@ -0,0 +1,118 @@ +/*- + * Copyright (c) 2015 John H. Baldwin + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef __KVM_SPARC64_H__ +#define __KVM_SPARC64_H__ + +#ifdef __sparc64__ +#include +#include +#include +#include +#include +#endif + +#define SPARC64_PAGE_SHIFT 13 +#define SPARC64_PAGE_SIZE (1 << SPARC64_PAGE_SHIFT) +#define SPARC64_PAGE_MASK (SPARC64_PAGE_SIZE - 1) + +#define SPARC64_MIN_DIRECT_ADDRESS (0xfffff80000000000) + +#define SPARC64_DIRECT_ADDRESS_BITS (43) +#define SPARC64_DIRECT_ADDRESS_MASK \ + (((uint64_t)1 << SPARC64_DIRECT_ADDRESS_BITS) - 1) + +#define SPARC64_DIRECT_TO_PHYS(va) ((va) & SPARC64_DIRECT_ADDRESS_MASK) + +#define SPARC64_TTE_SHIFT (5) + +#define SPARC64_TD_SIZE_SHIFT (61) +#define SPARC64_TD_PA_SHIFT (13) + +#define SPARC64_TD_SIZE_BITS (2) +#define SPARC64_TD_PA_CH_BITS (30) /* US-III{,i,+}, US-IV{,+}, SPARC64 V */ +#define SPARC64_TD_PA_BITS SPARC64_TD_PA_CH_BITS + +#define SPARC64_TD_SIZE_MASK (((uint64_t)1 << SPARC64_TD_SIZE_BITS) - 1) +#define SPARC64_TD_PA_MASK (((uint64_t)1 << SPARC64_TD_PA_BITS) - 1) + +#define SPARC64_TD_V ((uint64_t)1 << 63) + +#define SPARC64_TV_SIZE_BITS (SPARC64_TD_SIZE_BITS) +#define SPARC64_TV_VPN(va, sz) \ + ((((va) >> SPARC64_TTE_PAGE_SHIFT(sz)) << SPARC64_TV_SIZE_BITS) | sz) + +#define SPARC64_TTE_SIZE_SPREAD (3) +#define SPARC64_TTE_PAGE_SHIFT(sz) \ + (SPARC64_PAGE_SHIFT + ((sz) * SPARC64_TTE_SIZE_SPREAD)) + +#define SPARC64_TTE_GET_SIZE(tp) \ + (((tp)->tte_data >> SPARC64_TD_SIZE_SHIFT) & SPARC64_TD_SIZE_MASK) + +#define SPARC64_TTE_GET_PA(tp) \ + ((tp)->tte_data & (SPARC64_TD_PA_MASK << SPARC64_TD_PA_SHIFT)) + +struct sparc64_tte { + uint64_t tte_vpn; + uint64_t tte_data; +}; + +static __inline int +sparc64_tte_match(struct sparc64_tte *tp, kvaddr_t va) +{ + + return (((tp->tte_data & SPARC64_TD_V) != 0) && + (tp->tte_vpn == SPARC64_TV_VPN(va, SPARC64_TTE_GET_SIZE(tp)))); +} + +#ifdef __sparc64__ +_Static_assert(PAGE_SHIFT == SPARC64_PAGE_SHIFT, "PAGE_SHIFT mismatch"); +_Static_assert(PAGE_SIZE == SPARC64_PAGE_SIZE, "PAGE_SIZE mismatch"); +_Static_assert(PAGE_MASK == SPARC64_PAGE_MASK, "PAGE_MASK mismatch"); +_Static_assert(VM_MIN_DIRECT_ADDRESS == SPARC64_MIN_DIRECT_ADDRESS, + "VM_MIN_DIRECT_ADDRESS mismatch"); +_Static_assert(TLB_DIRECT_ADDRESS_BITS == SPARC64_DIRECT_ADDRESS_BITS, + "TLB_DIRECT_ADDRESS_BITS mismatch"); +_Static_assert(TLB_DIRECT_ADDRESS_MASK == SPARC64_DIRECT_ADDRESS_MASK, + "TLB_DIRECT_ADDRESS_MASK mismatch"); +_Static_assert(TTE_SHIFT == SPARC64_TTE_SHIFT, "TTE_SHIFT mismatch"); +_Static_assert(TD_SIZE_SHIFT == SPARC64_TD_SIZE_SHIFT, + "TD_SIZE_SHIFT mismatch"); +_Static_assert(TD_PA_SHIFT == SPARC64_TD_PA_SHIFT, + "TD_PA_SHIFT mismatch"); +_Static_assert(TD_SIZE_BITS == SPARC64_TD_SIZE_BITS, "TD_SIZE_BITS mismatch"); +_Static_assert(TD_PA_BITS == SPARC64_TD_PA_BITS, "TD_PA_BITS mismatch"); +_Static_assert(TD_SIZE_MASK == SPARC64_TD_SIZE_MASK, "TD_SIZE_MASK mismatch"); +_Static_assert(TD_PA_MASK == SPARC64_TD_PA_MASK, "TD_PA_MASK mismatch"); +_Static_assert(TD_V == SPARC64_TD_V, "TD_V mismatch"); +_Static_assert(TV_SIZE_BITS == SPARC64_TV_SIZE_BITS, "TV_SIZE_BITS mismatch"); +_Static_assert(TTE_SIZE_SPREAD == SPARC64_TTE_SIZE_SPREAD, + "TTE_SIZE_SPREAD mismatch"); +#endif + +#endif /* !__KVM_SPARC64_H__ */ Index: lib/libkvm/kvm_sparc64.c =================================================================== --- lib/libkvm/kvm_sparc64.c +++ lib/libkvm/kvm_sparc64.c @@ -47,65 +47,54 @@ */ #include -#include -#include -#include +#include +#include +#include #include #include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include +#include "../../sys/sparc64/include/kerneldump.h" #include "kvm_private.h" - -#ifndef btop -#define btop(x) (sparc64_btop(x)) -#define ptob(x) (sparc64_ptob(x)) -#endif +#include "kvm_sparc64.h" struct vmstate { off_t vm_tsb_off; - vm_size_t vm_tsb_mask; + uint64_t vm_tsb_mask; int vm_nregions; struct sparc64_dump_reg *vm_regions; }; -void -_kvm_freevtop(kvm_t *kd) +static int +_sparc64_probe(kvm_t *kd) { - if (kd->vmst != 0) { - free(kd->vmst->vm_regions); - free(kd->vmst); - } + + return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_SPARCV9)); +} + +static void +_sparc64_freevtop(kvm_t *kd) +{ + + free(kd->vmst->vm_regions); + free(kd->vmst); + kd->vmst = NULL; } static int -_kvm_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size) +_sparc64_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size) { /* XXX This has to be a raw file read, kvm_read is virtual. */ - if (lseek(kd->pmfd, pos, SEEK_SET) == -1) { - _kvm_syserr(kd, kd->program, "_kvm_read_phys: lseek"); - return (0); - } - if (read(kd->pmfd, buf, size) != (ssize_t)size) { - _kvm_syserr(kd, kd->program, "_kvm_read_phys: read"); + if (pread(kd->pmfd, buf, size, pos) != (ssize_t)size) { + _kvm_syserr(kd, kd->program, "_sparc64_read_phys: pread"); return (0); } return (1); } static int -_kvm_reg_cmp(const void *a, const void *b) +_sparc64_reg_cmp(const void *a, const void *b) { const struct sparc64_dump_reg *ra, *rb; @@ -122,14 +111,14 @@ #define KVM_OFF_NOTFOUND 0 static off_t -_kvm_find_off(struct vmstate *vm, vm_offset_t pa, vm_size_t size) +_sparc64_find_off(struct vmstate *vm, uint64_t pa, uint64_t size) { struct sparc64_dump_reg *reg, key; vm_offset_t o; key.dr_pa = pa; reg = bsearch(&key, vm->vm_regions, vm->vm_nregions, - sizeof(*vm->vm_regions), _kvm_reg_cmp); + sizeof(*vm->vm_regions), _sparc64_reg_cmp); if (reg == NULL) return (KVM_OFF_NOTFOUND); o = pa - reg->dr_pa; @@ -138,14 +127,15 @@ return (reg->dr_offs + o); } -int -_kvm_initvtop(kvm_t *kd) +static int +_sparc64_initvtop(kvm_t *kd) { struct sparc64_dump_hdr hdr; struct sparc64_dump_reg *regs; struct vmstate *vm; size_t regsz; - vm_offset_t pa; + uint64_t pa; + int i; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { @@ -154,8 +144,13 @@ } kd->vmst = vm; - if (!_kvm_read_phys(kd, 0, &hdr, sizeof(hdr))) + if (!_sparc64_read_phys(kd, 0, &hdr, sizeof(hdr))) goto fail_vm; + hdr.dh_hdr_size = be64toh(hdr.dh_hdr_size); + hdr.dh_tsb_pa = be64toh(hdr.dh_tsb_pa); + hdr.dh_tsb_size = be64toh(hdr.dh_tsb_size); + hdr.dh_tsb_mask = be64toh(hdr.dh_tsb_mask); + hdr.dh_nregions = be32toh(hdr.dh_nregions); pa = hdr.dh_tsb_pa; regsz = hdr.dh_nregions * sizeof(*regs); @@ -164,14 +159,19 @@ _kvm_err(kd, kd->program, "cannot allocate regions"); goto fail_vm; } - if (!_kvm_read_phys(kd, sizeof(hdr), regs, regsz)) + if (!_sparc64_read_phys(kd, sizeof(hdr), regs, regsz)) goto fail_regs; - qsort(regs, hdr.dh_nregions, sizeof(*regs), _kvm_reg_cmp); + for (i = 0; i < hdr.dh_nregions; i++) { + regs[i].dr_pa = be64toh(regs[i].dr_pa); + regs[i].dr_size = be64toh(regs[i].dr_size); + regs[i].dr_offs = be64toh(regs[i].dr_offs); + } + qsort(regs, hdr.dh_nregions, sizeof(*regs), _sparc64_reg_cmp); vm->vm_tsb_mask = hdr.dh_tsb_mask; vm->vm_regions = regs; vm->vm_nregions = hdr.dh_nregions; - vm->vm_tsb_off = _kvm_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); + vm->vm_tsb_off = _sparc64_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) { _kvm_err(kd, kd->program, "tsb not found in dump"); goto fail_regs; @@ -185,37 +185,60 @@ return (-1); } -int -_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) +static int +_sparc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { - struct tte tte; + struct sparc64_tte tte; off_t tte_off; - u_long vpn; + kvaddr_t vpn; off_t pa_off; - u_long pg_off; + kvaddr_t pg_off; int rest; - pg_off = va & PAGE_MASK; - if (va >= VM_MIN_DIRECT_ADDRESS) - pa_off = TLB_DIRECT_TO_PHYS(va) & ~PAGE_MASK; + pg_off = va & SPARC64_PAGE_MASK; + if (va >= SPARC64_MIN_DIRECT_ADDRESS) + pa_off = SPARC64_DIRECT_TO_PHYS(va) & ~SPARC64_PAGE_MASK; else { - vpn = btop(va); + vpn = va >> SPARC64_PAGE_SHIFT; tte_off = kd->vmst->vm_tsb_off + - ((vpn & kd->vmst->vm_tsb_mask) << TTE_SHIFT); - if (!_kvm_read_phys(kd, tte_off, &tte, sizeof(tte))) + ((vpn & kd->vmst->vm_tsb_mask) << SPARC64_TTE_SHIFT); + if (!_sparc64_read_phys(kd, tte_off, &tte, sizeof(tte))) goto invalid; - if (!tte_match(&tte, va)) + tte.tte_vpn = be64toh(tte.tte_vpn); + tte.tte_data = be64toh(tte.tte_data); + if (!sparc64_tte_match(&tte, va)) goto invalid; - pa_off = TTE_GET_PA(&tte); + pa_off = SPARC64_TTE_GET_PA(&tte); } - rest = PAGE_SIZE - pg_off; - pa_off = _kvm_find_off(kd->vmst, pa_off, rest); + rest = SPARC64_PAGE_SIZE - pg_off; + pa_off = _sparc64_find_off(kd->vmst, pa_off, rest); if (pa_off == KVM_OFF_NOTFOUND) goto invalid; *pa = pa_off + pg_off; return (rest); invalid: - _kvm_err(kd, 0, "invalid address (%lx)", va); + _kvm_err(kd, 0, "invalid address (%jx)", (uintmax_t)va); return (0); } + +static int +_sparc64_native(kvm_t *kd) +{ + +#ifdef __sparc64__ + return (1); +#else + return (0); +#endif +} + +struct kvm_arch kvm_sparc64 = { + .ka_probe = _sparc64_probe, + .ka_initvtop = _sparc64_initvtop, + .ka_freevtop = _sparc64_freevtop, + .ka_kvatop = _sparc64_kvatop, + .ka_native = _sparc64_native, +}; + +KVM_ARCH(kvm_sparc64); Index: lib/libkvm/kvm_vnet.c =================================================================== --- lib/libkvm/kvm_vnet.c +++ lib/libkvm/kvm_vnet.c @@ -43,7 +43,6 @@ #include -#include #include #include #include @@ -62,7 +61,7 @@ struct ucred cred; struct prison prison; struct vnet vnet; - struct nlist nl[] = { + struct kvm_nlist nl[] = { /* * Note: kvm_nlist strips the first '_' so add an extra one * here to __{start,stop}_set_vnet. @@ -90,6 +89,12 @@ lwpid_t dumptid; /* + * XXX: This only works for native kernels for now. + */ + if (!kvm_native(kd)) + return (-1); + + /* * Locate and cache locations of important symbols * using the internal version of _kvm_nlist, turning * off initialization to avoid recursion in case of @@ -204,7 +209,7 @@ /* * Check whether the vnet module has been initialized sucessfully - * or not, intialize it if permitted. + * or not, initialize it if permitted. */ int _kvm_vnet_initialized(kvm_t *kd, int intialize) @@ -222,8 +227,8 @@ * Check whether the value is within the vnet symbol range and * only if so adjust the offset relative to the current base. */ -uintptr_t -_kvm_vnet_validaddr(kvm_t *kd, uintptr_t value) +kvaddr_t +_kvm_vnet_validaddr(kvm_t *kd, kvaddr_t value) { if (value == 0) Index: rescue/rescue/Makefile =================================================================== --- rescue/rescue/Makefile +++ rescue/rescue/Makefile @@ -53,7 +53,7 @@ ed expr getfacl hostname kenv kill ln ls mkdir mv \ pkill ps pwd realpath rm rmdir setfacl sh sleep stty \ sync test -CRUNCH_LIBS+= -lcrypt -ledit -ljail -lkvm -ll -ltermcapw -lutil -lxo +CRUNCH_LIBS+= -lcrypt -ledit -ljail -lkvm -lelf -ll -ltermcapw -lutil -lxo CRUNCH_BUILDTOOLS+= bin/sh # Additional options for specific programs Index: share/mk/src.libnames.mk =================================================================== --- share/mk/src.libnames.mk +++ share/mk/src.libnames.mk @@ -180,6 +180,7 @@ _DP_cam= sbuf _DP_casper= capsicum nv pjdlog _DP_capsicum= nv +_DP_kvm= elf _DP_pjdlog= util _DP_opie= md _DP_usb= pthread Index: sys/sparc64/include/kerneldump.h =================================================================== --- sys/sparc64/include/kerneldump.h +++ sys/sparc64/include/kerneldump.h @@ -29,9 +29,9 @@ #define _MACHINE_KERNELDUMP_H_ struct sparc64_dump_reg { - vm_paddr_t dr_pa; - vm_offset_t dr_size; - vm_offset_t dr_offs; + uint64_t dr_pa; + uint64_t dr_size; + uint64_t dr_offs; }; /* @@ -40,11 +40,12 @@ * would require some ugly hacks. */ struct sparc64_dump_hdr { - vm_offset_t dh_hdr_size; - vm_paddr_t dh_tsb_pa; - vm_size_t dh_tsb_size; - vm_size_t dh_tsb_mask; - int dh_nregions; + uint64_t dh_hdr_size; + uint64_t dh_tsb_pa; + uint64_t dh_tsb_size; + uint64_t dh_tsb_mask; + int32_t dh_nregions; + int32_t dh_pad; struct sparc64_dump_reg dh_regions[]; };