diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -4713,8 +4713,8 @@ *pml5 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M; if (pmap->pm_pmltopu != NULL && pml5index < NUPML5E) { - if (pmap->pm_ucr3 != PMAP_NO_CR3) - *pml5 |= pg_nx; + MPASS(pmap->pm_ucr3 != PMAP_NO_CR3); + *pml5 |= pg_nx; pml5u = &pmap->pm_pmltopu[pml5index]; *pml5u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | @@ -4734,6 +4734,8 @@ if (!pmap_is_la57(pmap) && pmap->pm_pmltopu != NULL && pml4index < NUPML4E) { + MPASS(pmap->pm_ucr3 != PMAP_NO_CR3); + /* * PTI: Make all user-space mappings in the * kernel-mode page table no-execute so that @@ -4741,8 +4743,7 @@ * the kernel-mode page table active on return * to user space. */ - if (pmap->pm_ucr3 != PMAP_NO_CR3) - *pml4 |= pg_nx; + *pml4 |= pg_nx; pml4u = &pmap->pm_pmltopu[pml4index]; *pml4u = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | @@ -4767,8 +4768,8 @@ } if ((*pdp & PG_V) == 0) { /* Have to allocate a new pd, recurse */ - if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va), - lockp, va) == NULL) { + if (pmap_allocpte_nosleep(pmap, pmap_pdpe_pindex(va), + lockp, va) == NULL) { pmap_allocpte_free_unref(pmap, va, pmap_pml4e(pmap, va)); pmap_free_pt_page(pmap, m, true);