Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -8356,10 +8356,12 @@ } /* - * perform the pmap work for mincore + * Perform the pmap work for mincore(2). If the page is not both referenced and + * modified by this pmap, returns its physical address so that the caller can + * find other mappings. */ int -pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap) { pd_entry_t *pdep; pt_entry_t pte, PG_A, PG_M, PG_RW, PG_V; @@ -8372,7 +8374,6 @@ PG_RW = pmap_rw_bit(pmap); PMAP_LOCK(pmap); -retry: pdep = pmap_pde(pmap, addr); if (pdep != NULL && (*pdep & PG_V)) { if (*pdep & PG_PS) { @@ -8401,11 +8402,10 @@ if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { - /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ - if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) - goto retry; - } else - PA_UNLOCK_COND(*locked_pa); + *pap = pa; + } else { + *pap = 0; + } PMAP_UNLOCK(pmap); return (val); } Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c +++ sys/kern/kern_proc.c @@ -2363,7 +2363,7 @@ vm_object_t obj, tobj; vm_page_t m, m_adv; vm_offset_t addr; - vm_paddr_t locked_pa; + vm_paddr_t pa; vm_pindex_t pi, pi_adv, pindex; *super = false; @@ -2371,7 +2371,7 @@ if (vmmap_skip_res_cnt) return; - locked_pa = 0; + pa = 0; obj = entry->object.vm_object; addr = entry->start; m_adv = NULL; @@ -2401,8 +2401,7 @@ m_adv = NULL; if (m->psind != 0 && addr + pagesizes[1] <= entry->end && (addr & (pagesizes[1] - 1)) == 0 && - (pmap_mincore(map->pmap, addr, &locked_pa) & - MINCORE_SUPER) != 0) { + (pmap_mincore(map->pmap, addr, &pa) & MINCORE_SUPER) != 0) { *super = true; pi_adv = atop(pagesizes[1]); } else { @@ -2418,7 +2417,6 @@ *resident_count += pi_adv; next:; } - PA_UNLOCK_COND(locked_pa); } /* Index: sys/vm/vm_mmap.c =================================================================== --- sys/vm/vm_mmap.c +++ sys/vm/vm_mmap.c @@ -779,21 +779,16 @@ int kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec) { - vm_offset_t addr, first_addr; - vm_offset_t end, cend; pmap_t pmap; vm_map_t map; - int error = 0; - int vecindex, lastvecindex; - vm_map_entry_t current; - vm_map_entry_t entry; + vm_map_entry_t current, entry; vm_object_t object; - vm_paddr_t locked_pa; + vm_offset_t addr, cend, end, first_addr; + vm_paddr_t pa; vm_page_t m; vm_pindex_t pindex; - int mincoreinfo; + int error, lastvecindex, mincoreinfo, vecindex; unsigned int timestamp; - boolean_t locked; /* * Make sure that the addresses presented are valid for user @@ -836,7 +831,7 @@ * ignore submaps (for now) or null objects */ if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || - current->object.vm_object == NULL) + current->object.vm_object == NULL) continue; /* @@ -849,50 +844,44 @@ if (cend > end) cend = end; - /* - * scan this entry one page at a time - */ - while (addr < cend) { + for (; addr < cend; addr += PAGE_SIZE) { /* * Check pmap first, it is likely faster, also * it can provide info as to whether we are the * one referencing or modifying the page. */ - object = NULL; - locked_pa = 0; - retry: m = NULL; - mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); + object = NULL; +retry: + mincoreinfo = pmap_mincore(pmap, addr, &pa); if (mincore_mapped) { /* * We only care about this pmap's * mapping of the page, if any. */ - if (locked_pa != 0) { - vm_page_unlock(PHYS_TO_VM_PAGE( - locked_pa)); - } - } else if (locked_pa != 0) { + ; + } else if (pa != 0) { /* * The page is mapped by this process but not * both accessed and modified. It is also * managed. Acquire the object lock so that - * other mappings might be examined. + * other mappings might be examined. The page's + * identity may change at any point before its + * object lock is acquired, so re-validate if + * necessary. */ - m = PHYS_TO_VM_PAGE(locked_pa); - if (m->object != object) { + m = PHYS_TO_VM_PAGE(pa); + while (m->object != object) { if (object != NULL) VM_OBJECT_WUNLOCK(object); - object = m->object; - locked = VM_OBJECT_TRYWLOCK(object); - vm_page_unlock(m); - if (!locked) { - VM_OBJECT_WLOCK(object); - vm_page_lock(m); + object = (vm_object_t)atomic_load_ptr( + &m->object); + if (object == NULL) goto retry; - } - } else - vm_page_unlock(m); + VM_OBJECT_WLOCK(object); + } + if (pa != pmap_extract(pmap, addr)) + goto retry; KASSERT(m->valid == VM_PAGE_BITS_ALL, ("mincore: page %p is mapped but invalid", m)); @@ -922,11 +911,14 @@ } } if (m != NULL) { - /* Examine other mappings to the page. */ + VM_OBJECT_ASSERT_WLOCKED(m->object); + + /* Examine other mappings of the page. */ if (m->dirty == 0 && pmap_is_modified(m)) vm_page_dirty(m); if (m->dirty != 0) mincoreinfo |= MINCORE_MODIFIED_OTHER; + /* * The first test for PGA_REFERENCED is an * optimization. The second test is @@ -985,7 +977,6 @@ goto RestartScan; lastvecindex = vecindex; - addr += PAGE_SIZE; } }