Index: sys/amd64/amd64/minidump_machdep.c =================================================================== --- sys/amd64/amd64/minidump_machdep.c +++ sys/amd64/amd64/minidump_machdep.c @@ -301,6 +301,8 @@ } } + pmap_dump_add_pages(); + /* Calculate dump size. */ dumpsize = pmapsize; dumpsize += round_page(msgbufp->msg_size); @@ -430,10 +432,14 @@ if (error != 0) goto fail; + pmap_dump_drop_pages(); + printf("\nDump complete\n"); return (0); fail: + pmap_dump_drop_pages(); + if (error < 0) error = -error; Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -4176,7 +4176,6 @@ PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* Entire chunk is free; return it. */ m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); - dump_drop_page(m_pc->phys_addr); mtx_lock(&pv_chunks_mutex); TAILQ_REMOVE(&pv_chunks, pc, pc_lru); break; @@ -4260,7 +4259,6 @@ PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); - dump_drop_page(m->phys_addr); vm_page_unwire_noq(m); vm_page_free(m); } @@ -4350,7 +4348,6 @@ } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); - dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ @@ -4447,7 +4444,6 @@ } PV_STAT(atomic_add_int(&pc_chunk_count, 1)); PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); - dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; pc->pc_map[0] = PC_FREE0; @@ -10288,6 +10284,44 @@ NULL, 0, sysctl_kmaps, "A", "Dump kernel address layout"); +void +pmap_dump_add_pages(void) +{ + struct pv_chunk *pc; + vm_page_t m_pc; + + mtx_lock(&pv_chunks_mutex); + TAILQ_FOREACH(pc, &pv_chunks, pc_lru) { + /* + * Skip markers. + */ + if (pc->pc_pmap == NULL) + continue; + m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + dump_add_page(m_pc->phys_addr); + } + mtx_unlock(&pv_chunks_mutex); +} + +void +pmap_dump_drop_pages(void) +{ + struct pv_chunk *pc; + vm_page_t m_pc; + + mtx_lock(&pv_chunks_mutex); + TAILQ_FOREACH(pc, &pv_chunks, pc_lru) { + /* + * Skip markers. + */ + if (pc->pc_pmap == NULL) + continue; + m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); + dump_drop_page(m_pc->phys_addr); + } + mtx_unlock(&pv_chunks_mutex); +} + #ifdef DDB DB_SHOW_COMMAND(pte, pmap_print_pte) { Index: sys/amd64/include/pmap.h =================================================================== --- sys/amd64/include/pmap.h +++ sys/amd64/include/pmap.h @@ -468,6 +468,9 @@ void pmap_thread_init_invl_gen(struct thread *td); int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); void pmap_page_array_startup(long count); + +void pmap_dump_add_pages(void); +void pmap_dump_drop_pages(void); #endif /* _KERNEL */ /* Return various clipped indexes for a given VA */