Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -7147,7 +7147,7 @@ */ if (!pmap_pkru_same(pmap, va, va + NBPDR)) { pmap_abort_ptp(pmap, va, pdpg); - return (KERN_FAILURE); + return (KERN_PROTECTION_FAILURE); } if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) { newpde &= ~X86_PG_PKU_MASK; Index: sys/mips/include/pmap.h =================================================================== --- sys/mips/include/pmap.h +++ sys/mips/include/pmap.h @@ -190,6 +190,12 @@ return (0); } +static inline bool +pmap_ps_enabled(pmap_t pmap __unused) +{ + return (false); +} + #endif /* _KERNEL */ #endif /* !LOCORE */ Index: sys/powerpc/aim/mmu_oea.c =================================================================== --- sys/powerpc/aim/mmu_oea.c +++ sys/powerpc/aim/mmu_oea.c @@ -327,6 +327,7 @@ vm_offset_t moea_quick_enter_page(vm_page_t m); void moea_quick_remove_page(vm_offset_t addr); boolean_t moea_page_is_mapped(vm_page_t m); +bool moea_ps_enabled(pmap_t pmap); static int moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); static int moea_decode_kernel_ptr(vm_offset_t addr, @@ -370,6 +371,7 @@ .quick_enter_page = moea_quick_enter_page, .quick_remove_page = moea_quick_remove_page, .page_is_mapped = moea_page_is_mapped, + .ps_enabled = moea_ps_enabled, /* Internal interfaces */ .bootstrap = moea_bootstrap, @@ -1122,6 +1124,12 @@ return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); } +bool +moea_ps_enabled(pmap_t pmap __unused) +{ + return (false); +} + /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page Index: sys/powerpc/booke/pmap.c =================================================================== --- sys/powerpc/booke/pmap.c +++ sys/powerpc/booke/pmap.c @@ -354,6 +354,7 @@ int *is_user, vm_offset_t *decoded_addr); static void mmu_booke_page_array_startup(long); static boolean_t mmu_booke_page_is_mapped(vm_page_t m); +static bool mmu_booke_ps_enabled(pmap_t pmap); static struct pmap_funcs mmu_booke_methods = { /* pmap dispatcher interface */ @@ -396,6 +397,7 @@ .quick_remove_page = mmu_booke_quick_remove_page, .page_array_startup = mmu_booke_page_array_startup, .page_is_mapped = mmu_booke_page_is_mapped, + .ps_enabled = mmu_booke_ps_enabled, /* Internal interfaces */ .bootstrap = mmu_booke_bootstrap, @@ -1226,6 +1228,12 @@ return (!TAILQ_EMPTY(&(m)->md.pv_list)); } +static bool +mmu_booke_ps_enabled(pmap_t pmap __unused) +{ + return (false); +} + /* * Initialize pmap associated with process 0. */ Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -540,17 +540,13 @@ pidx <= pager_last; pidx += npages, m = vm_page_next(&m[npages - 1])) { vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; -#if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ - __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) || \ - defined(__powerpc64__) + psind = m->psind; if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || !pmap_ps_enabled(fs->map->pmap) || fs->wired)) psind = 0; -#else - psind = 0; -#endif + npages = atop(pagesizes[psind]); for (i = 0; i < npages; i++) { vm_fault_populate_check_page(&m[i]); @@ -559,8 +555,18 @@ VM_OBJECT_WUNLOCK(fs->first_object); rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); -#if defined(__amd64__) - if (psind > 0 && rv == KERN_FAILURE) { + + /* + * pmap_enter() may fail for a superpage mapping if additional + * protection policies prevent the full mapping. + * For example, this will happen on amd64 if the entire + * address range does not share the same userspace protection + * key. Revert to single-page mappings if this happens. + */ + MPASS(rv == KERN_SUCCESS || + (psind > 0 && rv == KERN_PROTECTION_FAILURE)); + if (__predict_false(psind > 0 && + rv == KERN_PROTECTION_FAILURE)) { for (i = 0; i < npages; i++) { rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), &m[i], fs->prot, fs->fault_type | @@ -568,9 +574,7 @@ MPASS(rv == KERN_SUCCESS); } } -#else - MPASS(rv == KERN_SUCCESS); -#endif + VM_OBJECT_WLOCK(fs->first_object); for (i = 0; i < npages; i++) { if ((fs->fault_flags & VM_FAULT_WIRE) != 0)