Index: lib/libkvm/kvm_minidump_arm.c =================================================================== --- lib/libkvm/kvm_minidump_arm.c +++ lib/libkvm/kvm_minidump_arm.c @@ -242,12 +242,25 @@ if ((pte & L2_TYPE_MASK) == L2_TYPE_L) { offset = va & L2_L_OFFSET; a = pte & L2_L_FRAME; +#if ARM_ARCH_6 || ARM_ARCH_7A + /* + * On armv6/armv7 there is no support for 1kB pages. + * The last bit might be No-Execute, so check it. + */ + } else if (((pte & L2_TYPE_MASK) == L2_TYPE_S) || + ((pte & L2_TYPE_MASK) == (L2_TYPE_S | L2_XN))) { +#else } else if ((pte & L2_TYPE_MASK) == L2_TYPE_S) { +#endif offset = va & L2_S_OFFSET; a = pte & L2_S_FRAME; } else goto invalid; + /* We can be inside superpage, convert to 4kB format */ + a = a + (offset & ~PAGE_MASK); + offset = offset & PAGE_MASK; + ofs = hpt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_kvm_vatop: physical " Index: sys/arm/arm/minidump_machdep.c =================================================================== --- sys/arm/arm/minidump_machdep.c +++ sys/arm/arm/minidump_machdep.c @@ -42,6 +42,7 @@ #include #endif #include +#include #include #include #include @@ -49,6 +50,10 @@ #include #include #include +#include +#ifdef ARM_NEW_PMAP +#include +#endif CTASSERT(sizeof(struct kerneldumpheader) == 512); @@ -61,8 +66,6 @@ uint32_t *vm_page_dump; int vm_page_dump_size; -#ifndef ARM_NEW_PMAP - static struct kerneldumpheader kdh; static off_t dumplo; @@ -199,6 +202,8 @@ /* A fake page table page, to avoid having to handle both 4K and 2M pages */ static pt_entry_t fakept[NPTEPG]; +#ifndef ARM_NEW_PMAP + int minidumpsys(struct dumperinfo *di) { @@ -260,7 +265,16 @@ k++; pa += PAGE_SIZE; } +#if ARM_ARCH_6 || ARM_ARCH_7A + /* + * On armv6/armv7 there is no support for 1kB pages. + * The last bit might be No-Execute, so check it. + */ + } else if (((pt[k] & L2_TYPE_MASK) == L2_TYPE_S) || + (pt[k] & L2_TYPE_MASK) == (L2_TYPE_S | L2_XN)) { +#else } else if ((pt[k] & L2_TYPE_MASK) == L2_TYPE_S) { +#endif pa = (pt[k] & L2_S_FRAME) | (va & L2_S_OFFSET); if (is_dumpable(pa)) @@ -484,10 +498,283 @@ int minidumpsys(struct dumperinfo *di) { + struct minidumphdr mdhdr; + uint64_t dumpsize; + uint32_t ptesize; + uint32_t bits; + uint32_t pa, prev_pa = 0, count = 0; + vm_offset_t va; + pt1_entry_t *pte1p = NULL; + pt2_entry_t *pte2p = NULL; + pt2_entry_t *pte2p_pt; + int i, k, bit, error; + char *addr; - return (0); + /* + * Flush caches. Note that in the SMP case this operates only on the + * current CPU's L1 cache. Before we reach this point, code in either + * the system shutdown or kernel debugger has called stop_cpus() to stop + * all cores other than this one. Part of the ARM handling of + * stop_cpus() is to call wbinv_all() on that core's local L1 cache. So + * by time we get to here, all that remains is to flush the L1 for the + * current CPU, then the L2. + */ + cpu_idcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + + counter = 0; + /* Walk page table pages, set bits in vm_page_dump */ + ptesize = 0; + for (va = KERNBASE; va < kernel_vm_end; va += NBPDR) { + /* + * We always write a page, even if it is zero. Each + * page written corresponds to 2MB of space + */ + ptesize += L2_TABLE_SIZE; + pmap_get_pt1_pt2(pmap_kernel(), va, &pte1p, &pte2p); + if (pte1_is_valid(*pte1p) && pte1_is_section(*pte1p)) { + /* This is a section mapping 1M page. */ + pa = (*pte1p & L1_S_ADDR_MASK) | (va & ~L1_S_ADDR_MASK); + for (k = 0; k < (L1_S_SIZE / PAGE_SIZE); k++) { + if (is_dumpable(pa)) + dump_add_page(pa); + pa += PAGE_SIZE; + } + continue; + } + if (pte1_is_valid(*pte1p) && pte1_is_link(*pte1p)) { + /* Set bit for each valid page in this 1MB block */ + addr = pmap_kenter_temporary(*pte1p & L1_C_ADDR_MASK, 0); + pte2p_pt = (pt2_entry_t*)(addr + + (((uint32_t)*pte1p & L1_C_ADDR_MASK) & PAGE_MASK)); + for (k = 0; k < 256; k++) { + if ((pte2p_pt[k] & L2_TYPE_MASK) == L2_TYPE_L) { + pa = (pte2p_pt[k] & L2_L_FRAME) | + (va & L2_L_OFFSET); + for (i = 0; i < 16; i++) { + if (is_dumpable(pa)) + dump_add_page(pa); + k++; + pa += PAGE_SIZE; + } + } else if (((pte2p_pt[k] & L2_TYPE_MASK) == L2_TYPE_S) || + (pte2p_pt[k]) == (L2_TYPE_S | L2_NX)) { + pa = (pte2p_pt[k] & L2_S_FRAME) | + (va & L2_S_OFFSET); + if (is_dumpable(pa)) + dump_add_page(pa); + } + } + } else { + /* Nothing, we're going to dump a null page */ + } + } + + /* Calculate dump size. */ + dumpsize = ptesize; + dumpsize += round_page(msgbufp->msg_size); + dumpsize += round_page(vm_page_dump_size); + + for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { + bits = vm_page_dump[i]; + while (bits) { + bit = ffs(bits) - 1; + pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + + bit) * PAGE_SIZE; + /* Clear out undumpable pages now if needed */ + if (is_dumpable(pa)) + dumpsize += PAGE_SIZE; + else + dump_drop_page(pa); + bits &= ~(1ul << bit); + } + } + + dumpsize += PAGE_SIZE; + + /* Determine dump offset on device. */ + if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) { + error = ENOSPC; + goto fail; } + dumplo = di->mediaoffset + di->mediasize - dumpsize; + dumplo -= sizeof(kdh) * 2; + progress = dumpsize; + + /* Initialize mdhdr */ + bzero(&mdhdr, sizeof(mdhdr)); + strcpy(mdhdr.magic, MINIDUMP_MAGIC); + mdhdr.version = MINIDUMP_VERSION; + mdhdr.msgbufsize = msgbufp->msg_size; + mdhdr.bitmapsize = vm_page_dump_size; + mdhdr.ptesize = ptesize; + mdhdr.kernbase = KERNBASE; + + mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION, dumpsize, + di->blocksize); + + printf("Physical memory: %u MB\n", ptoa((uintmax_t)physmem) / 1048576); + printf("Dumping %llu MB:", (long long)dumpsize >> 20); + + /* Dump leader */ + error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); + if (error) + goto fail; + dumplo += sizeof(kdh); + + /* Dump my header */ + bzero(&fakept, sizeof(fakept)); + bcopy(&mdhdr, &fakept, sizeof(mdhdr)); + error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE); + if (error) + goto fail; + + /* Dump msgbuf up front */ + error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); + if (error) + goto fail; + + /* Dump bitmap */ + error = blk_write(di, (char *)vm_page_dump, 0, + round_page(vm_page_dump_size)); + if (error) + goto fail; + + /* Dump kernel page table pages */ + for (va = KERNBASE; va < kernel_vm_end; va += NBPDR) { + + /* We always write a page, even if it is zero */ + pte1p = NULL; + pte2p = NULL; + pmap_get_pt1_pt2(pmap_kernel(), va, &pte1p, &pte2p); + if (pte1p && pte1_is_valid(*pte1p) && pte1_is_section(*pte1p)) { + if (count) { + error = blk_write_cont(di, prev_pa, + count * L2_TABLE_SIZE); + if (error) + goto fail; + count = 0; + prev_pa = 0; + } + /* This is a single 2M block. Generate a fake PTP */ + pa = (*pte1p & L1_S_ADDR_MASK) | (va & ~L1_S_ADDR_MASK); + for (k = 0; k < (L1_S_SIZE / PAGE_SIZE); k++) + fakept[k] = PTE2_KERN(pa + (k * PAGE_SIZE), PTE2_AP_KRW, 0); + error = blk_write(di, (char *)&fakept, 0, + L2_TABLE_SIZE); + if (error) + goto fail; + /* Flush, in case we reuse fakept in the same block */ + error = blk_flush(di); + if (error) + goto fail; + continue; + } + if (pte1p && pte1_is_valid(*pte1p) && pte1_is_link(*pte1p)) { + pa = *pte1p & L1_C_ADDR_MASK; + if (!count) { + prev_pa = pa; + count++; + } + else { + if (pa == (prev_pa + count * L2_TABLE_SIZE)) + count++; + else { + error = blk_write_cont(di, prev_pa, + count * L2_TABLE_SIZE); + if (error) + goto fail; + count = 1; + prev_pa = pa; + } + } + } else { + if (count) { + error = blk_write_cont(di, prev_pa, + count * L2_TABLE_SIZE); + if (error) + goto fail; + count = 0; + prev_pa = 0; + } + bzero(fakept, sizeof(fakept)); + error = blk_write(di, (char *)&fakept, 0, + L2_TABLE_SIZE); + if (error) + goto fail; + /* Flush, in case we reuse fakept in the same block */ + error = blk_flush(di); + if (error) + goto fail; + } + } + + if (count) { + error = blk_write_cont(di, prev_pa, count * L2_TABLE_SIZE); + if (error) + goto fail; + count = 0; + prev_pa = 0; + } + + /* Dump memory chunks */ + for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { + bits = vm_page_dump[i]; + while (bits) { + bit = ffs(bits) - 1; + pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + + bit) * PAGE_SIZE; + if (!count) { + prev_pa = pa; + count++; + } else { + if (pa == (prev_pa + count * PAGE_SIZE)) + count++; + else { + error = blk_write_cont(di, prev_pa, + count * PAGE_SIZE); + if (error) + goto fail; + count = 1; + prev_pa = pa; + } + } + bits &= ~(1ul << bit); + } + } + if (count) { + error = blk_write_cont(di, prev_pa, count * PAGE_SIZE); + if (error) + goto fail; + count = 0; + prev_pa = 0; + } + + /* Dump trailer */ + error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); + if (error) + goto fail; + dumplo += sizeof(kdh); + + /* Signal completion, signoff and exit stage left. */ + dump_write(di, NULL, 0, 0, 0); + printf("\nDump complete\n"); + return (0); + +fail: + if (error < 0) + error = -error; + + if (error == ECANCELED) + printf("\nDump aborted\n"); + else if (error == ENOSPC) + printf("\nDump failed. Partition too small.\n"); + else + printf("\n** DUMP FAILED (ERROR %d) **\n", error); + + return (error); +} #endif void Index: sys/arm/arm/pmap-v6-new.c =================================================================== --- sys/arm/arm/pmap-v6-new.c +++ sys/arm/arm/pmap-v6-new.c @@ -3838,6 +3838,47 @@ } /* + * Fetch pointers to the PDE/PTE for the given pmap/VA pair. + * Returns TRUE if the mapping exists, else FALSE. + * + * NOTE: This function is only used by a couple of arm-specific modules. + * It is not safe to take any pmap locks here, since we could be right + * in the middle of debugging the pmap anyway... + * + * It is possible for this routine to return FALSE even though a valid + * mapping does exist. This is because we don't lock, so the metadata + * state may be inconsistent. + * + * NOTE: We can return a NULL *pte2p in the case where the L1 pte1p is + * a "section" mapping. + */ +boolean_t +pmap_get_pt1_pt2(pmap_t pmap, vm_offset_t va, pt1_entry_t **pte1p, + pt2_entry_t **pte2p) +{ + + if (pmap->pm_pt1 == NULL) + return (FALSE); + + *pte1p = pmap_pte1(pmap, va); + + if (pte1_is_valid(pte1_load(*pte1p))) + return (FALSE); + + if (pte1_is_section(pte1_load(*pte1p))) { + *pte2p = NULL; + return (TRUE); + } + + if (pmap->pm_pt2tab == NULL) + return (FALSE); + + *pte2p = pmap_pt2tab_entry(pmap, va); + + return (TRUE); +} + +/* * Do the things to unmap a page in a process. */ static int Index: sys/arm/include/pmap-v6.h =================================================================== --- sys/arm/include/pmap-v6.h +++ sys/arm/include/pmap-v6.h @@ -237,6 +237,8 @@ }; void pmap_devmap_bootstrap(const struct pmap_devmap *); +boolean_t pmap_get_pt1_pt2(pmap_t pmap, vm_offset_t va, pt1_entry_t **pte1p, + pt2_entry_t **pte2p);; #endif /* _KERNEL */