Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -436,7 +436,7 @@ static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "VM/pmap parameters"); -static int pg_ps_enabled = 1; +static int __read_frequently pg_ps_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &pg_ps_enabled, 0, "Are large page mappings enabled?"); @@ -1318,7 +1318,7 @@ static vm_page_t pmap_large_map_getptp_unlocked(void); static vm_paddr_t pmap_large_map_kextract(vm_offset_t va); #if VM_NRESERVLEVEL > 0 -static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, +static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp); #endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, @@ -6856,7 +6856,7 @@ * aligned, contiguous physical memory and (2) the 4KB page mappings must have * identical characteristics. */ -static void +static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp) { @@ -6865,6 +6865,10 @@ pt_entry_t allpte_PG_A, PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V; int PG_PTE_CACHE; + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + if (!pmap_ps_enabled(pmap)) + return (false); + PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); PG_M = pmap_modified_bit(pmap); @@ -6873,8 +6877,6 @@ PG_PKU_MASK = pmap_pku_mask_bit(pmap); PG_PTE_CACHE = pmap_cache_mask(pmap, 0); - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - /* * Examine the first PTE in the specified PTP. Abort if this PTE is * ineligible for promotion due to hardware errata, invalid, or does @@ -6883,12 +6885,12 @@ firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME); newpde = *firstpte; if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap, newpde))) - return; + return (false); if ((newpde & ((PG_FRAME & PDRMASK) | PG_V)) != PG_V) { counter_u64_add(pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); - return; + return (false); } /* @@ -6933,7 +6935,7 @@ counter_u64_add(pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); - return; + return (false); } setpte: if ((oldpte & (PG_M | PG_RW)) == PG_RW) { @@ -6952,7 +6954,7 @@ counter_u64_add(pmap_pde_p_failures, 1); CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx" " in pmap %p", va, pmap); - return; + return (false); } allpte_PG_A &= oldpte; pa -= PAGE_SIZE; @@ -6993,7 +6995,7 @@ CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx in pmap %p", va, pmap); - return; + return (false); } /* @@ -7018,6 +7020,7 @@ counter_u64_add(pmap_pde_promotions, 1); CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx" " in pmap %p", va, pmap); + return (true); } #endif /* VM_NRESERVLEVEL > 0 */ @@ -7391,10 +7394,9 @@ * populated, then attempt promotion. */ if ((mpte == NULL || mpte->ref_count == NPTEPG) && - pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) - pmap_promote_pde(pmap, pde, va, mpte, &lock); + (void)pmap_promote_pde(pmap, pde, va, mpte, &lock); #endif rv = KERN_SUCCESS; @@ -7782,18 +7784,17 @@ * attempt promotion. */ if ((mpte == NULL || mpte->ref_count == NPTEPG) && - pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (pde == NULL) pde = pmap_pde(pmap, va); - pmap_promote_pde(pmap, pde, va, mpte, lockp); /* * If promotion succeeds, then the next call to this function * should not be given the unmapped PTP as a hint. */ - mpte = NULL; + if (pmap_promote_pde(pmap, pde, va, mpte, lockp)) + mpte = NULL; } #endif @@ -10359,10 +10360,9 @@ m = PHYS_TO_VM_PAGE(*pte & PG_FRAME); if ((mpte == NULL || mpte->ref_count == NPTEPG) && - pmap_ps_enabled(pmap) && (m->flags & PG_FICTITIOUS) == 0 && - vm_reserv_level_iffullpop(m) == 0) { - pmap_promote_pde(pmap, pde, va, mpte, &lock); + vm_reserv_level_iffullpop(m) == 0 && + pmap_promote_pde(pmap, pde, va, mpte, &lock)) { #ifdef INVARIANTS atomic_add_long(&ad_emulation_superpage_promotions, 1); #endif