Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -358,6 +358,13 @@ #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) +#define PV_LIST_LOCK_MATCHES(lockp, pa) ({ \ + (PHYS_TO_PV_LIST_LOCK(pa) == (*lockp)); \ +}) + +#define PV_LIST_LOCK_MATCHES_VM_PAGE(lockp, m) \ + PV_LIST_LOCK_MATCHES(lockp, VM_PAGE_TO_PHYS(m)) + #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ \ @@ -1149,7 +1156,7 @@ static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_chunk_batch(struct pv_chunklist *batch); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); -static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); +static pv_entry_t get_pv_entry(pmap_t pmap, bool reclaim, struct rwlock **lockp); static int popcnt_pc_map_pq(uint64_t *map); static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); static void reserve_pv_entries(pmap_t pmap, int needed, @@ -4160,7 +4167,6 @@ static int active_reclaims = 0; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); - KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); pmap = NULL; m_pc = NULL; PG_G = PG_A = PG_M = PG_RW = 0; @@ -4420,7 +4426,7 @@ * The given PV list lock may be released. */ static pv_entry_t -get_pv_entry(pmap_t pmap, struct rwlock **lockp) +get_pv_entry(pmap_t pmap, bool reclaim, struct rwlock **lockp) { int bit, field; pv_entry_t pv; @@ -4457,7 +4463,7 @@ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { - if (lockp == NULL) { + if (!reclaim) { PV_STAT(pc_chunk_tryfail++); return (NULL); } @@ -4747,8 +4753,9 @@ pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - /* Pass NULL instead of the lock pointer to disable reclamation. */ - if ((pv = get_pv_entry(pmap, NULL)) != NULL) { + if (!PV_LIST_LOCK_MATCHES_VM_PAGE(lockp, m)) + RELEASE_PV_LIST_LOCK(lockp); + if ((pv = get_pv_entry(pmap, false, NULL)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); @@ -4772,12 +4779,13 @@ vm_paddr_t pa; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - /* Pass NULL instead of the lock pointer to disable reclamation. */ - if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? - NULL : lockp)) == NULL) + pa = pde & PG_PS_FRAME; + if (!PV_LIST_LOCK_MATCHES(lockp, pa)) + RELEASE_PV_LIST_LOCK(lockp); + pv = get_pv_entry(pmap, !(flags & PMAP_ENTER_NORECLAIM), lockp); + if (pv == NULL) return (false); pv->pv_va = va; - pa = pde & PG_PS_FRAME; CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); @@ -5978,7 +5986,9 @@ */ if ((newpte & PG_MANAGED) != 0) { if (pv == NULL) { - pv = get_pv_entry(pmap, &lock); + if (!PV_LIST_LOCK_MATCHES_VM_PAGE(&lock, m)) + RELEASE_PV_LIST_LOCK(&lock); + pv = get_pv_entry(pmap, true, &lock); pv->pv_va = va; } CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); @@ -6171,6 +6181,7 @@ * Abort this mapping if its PV entry could not be created. */ if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) { + RELEASE_PV_LIST_LOCK(lockp); SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { /* @@ -6350,6 +6361,7 @@ */ if ((m->oflags & VPO_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { + RELEASE_PV_LIST_LOCK(lockp); if (mpte != NULL) { SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpte, &free)) {