Index: sys/arm/arm/pmap-v4.c =================================================================== --- sys/arm/arm/pmap-v4.c +++ sys/arm/arm/pmap-v4.c @@ -2880,6 +2880,11 @@ } } +void +pmap_qenter_nx(vm_offset_t va, vm_page_t *m, int count) +{ + pmap_qenter(va, m, count); +} /* * this routine jerks page mappings from the Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -1779,8 +1779,8 @@ * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ -void -pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +static void +pmap_qenter_flags(vm_offset_t sva, vm_page_t *ma, int count, pt2_entry_t flags) { u_int anychanged; pt2_entry_t *epte2p, *pte2p, pte2; @@ -1798,7 +1798,7 @@ (pte2_attr(pte2) != vm_page_pte2_attr(m))) { anychanged++; pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, - vm_page_pte2_attr(m))); + vm_page_pte2_attr(m) | flags)); } pte2p++; } @@ -1806,6 +1806,18 @@ tlb_flush_range(sva, count * PAGE_SIZE); } +void +pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter_flags(sva, ma, count, 0); +} + +void +pmap_qenter_nx(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter_flags(sva, ma, count, PTE2_NX); +} + /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1236,6 +1236,12 @@ pmap_invalidate_range(kernel_pmap, sva, va); } +void +pmap_qenter_nx(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter(sva, ma, count); +} + /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -1663,8 +1663,8 @@ * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ -void -pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +static void +pmap_qenter_flags(vm_offset_t sva, vm_page_t *ma, int count, pt_entry_t flags) { pt_entry_t *endpte, oldpte, pa, *pte; vm_page_t m; @@ -1677,7 +1677,7 @@ pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { oldpte |= *pte; - pte_store(pte, pa | pgeflag | PG_RW | PG_V); + pte_store(pte, pa | flags); } pte++; } @@ -1686,6 +1686,23 @@ PAGE_SIZE); } +void +pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter_flags(sva, ma, count, pgeflag | PG_RW | PG_V); +} + +void +pmap_qenter_nx(vm_offset_t sva, vm_page_t *ma, int count) +{ +#if defined(PAE) || defined(PAE_TABLES) + pmap_qenter_flags(sva, ma, count, pgeflag | pg_nx | PG_RW | PG_V); +#else + pmap_qenter(sva, ma, count); +#endif +} + + /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -927,6 +927,12 @@ mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); } +void +pmap_qenter_nx(vm_offset_t va, vm_page_t *m, int count) +{ + pmap_qenter(va, m, count); +} + /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. Index: sys/powerpc/powerpc/pmap_dispatch.c =================================================================== --- sys/powerpc/powerpc/pmap_dispatch.c +++ sys/powerpc/powerpc/pmap_dispatch.c @@ -310,6 +310,12 @@ } void +pmap_qenter_nx(vm_offset_t start, vm_page_t *m, int count) +{ + pmap_qenter(start, m, count); +} + +void pmap_qremove(vm_offset_t start, int count) { Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -1011,8 +1011,8 @@ * over. The page *must* be wired. * Note: SMP coherent. Uses a ranged shootdown IPI. */ -void -pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +static void +pmap_qenter_flags(vm_offset_t sva, vm_page_t *ma, int count, pt_entry_t flags) { pt_entry_t *l3, pa; vm_offset_t va; @@ -1028,7 +1028,7 @@ pn = (pa / PAGE_SIZE); l3 = pmap_l3(kernel_pmap, va); - entry = (PTE_V | PTE_RWX); + entry = flags; entry |= (pn << PTE_PPN0_S); pmap_load_store(l3, entry); @@ -1038,6 +1038,18 @@ pmap_invalidate_range(kernel_pmap, sva, va); } +void +pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter_flags(sva, ma, count, PTE_V | PTE_RWX); +} + +void +pmap_qenter_nx(vm_offset_t sva, vm_page_t *ma, int count) +{ + pmap_qenter_flags(sva, ma, count, PTE_V | PTE_R | PTE_W); +} + /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c +++ sys/sparc64/sparc64/pmap.c @@ -1178,6 +1178,12 @@ tlb_range_demap(kernel_pmap, sva, va); } +void +pmap_qenter_nx(vm_offset_t sva, vm_page_t *m, int count) +{ + pmap_qenter(sva, m, count); +} + /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter.