diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -7545,8 +7545,9 @@ * resident, we are creating it here. */ if (va < VM_MAXUSER_ADDRESS) { + pdp_entry_t *pdpe; + pd_entry_t *pde; vm_pindex_t ptepindex; - pd_entry_t *ptepa; /* * Calculate pagetable page index @@ -7555,31 +7556,35 @@ if (mpte && (mpte->pindex == ptepindex)) { mpte->ref_count++; } else { - /* - * Get the page directory entry - */ - ptepa = pmap_pde(pmap, va); - /* * If the page table page is mapped, we just increment * the hold count, and activate it. Otherwise, we - * attempt to allocate a page table page. If this - * attempt fails, we don't retry. Instead, we give up. + * attempt to allocate a page table page, passing NULL + * instead of the PV list lock pointer because we don't + * intend to sleep. If this attempt fails, we don't + * retry. Instead, we give up. */ - if (ptepa && (*ptepa & PG_V) != 0) { - if (*ptepa & PG_PS) + pdpe = pmap_pdpe(pmap, va); + if (pdpe != NULL && (*pdpe & PG_V) != 0) { + if ((*pdpe & PG_PS) != 0) return (NULL); - mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); - mpte->ref_count++; + pde = pmap_pdpe_to_pde(pdpe, va); + if ((*pde & PG_V) != 0) { + if ((*pde & PG_PS) != 0) + return (NULL); + mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); + mpte->ref_count++; + } else { + mpte = pmap_allocpte_alloc(pmap, + ptepindex, NULL, va); + if (mpte == NULL) + return (NULL); + } } else { - /* - * Pass NULL instead of the PV list lock - * pointer, because we don't intend to sleep. - */ mpte = pmap_allocpte_alloc(pmap, ptepindex, NULL, va); if (mpte == NULL) - return (mpte); + return (NULL); } } pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));