Index: head/sys/amd64/amd64/pmap.c =================================================================== --- head/sys/amd64/amd64/pmap.c +++ head/sys/amd64/amd64/pmap.c @@ -604,8 +604,10 @@ struct rwlock **lockp); static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags, struct rwlock **lockp); +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); +#endif static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -628,8 +630,10 @@ pd_entry_t pde); static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask); +#if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); +#endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask); @@ -3359,6 +3363,7 @@ PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); } +#if VM_NRESERVLEVEL > 0 /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry @@ -3399,6 +3404,7 @@ pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * First find and then destroy the pv entry for the specified pmap and virtual @@ -4243,6 +4249,7 @@ PMAP_UNLOCK(pmap); } +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the 512, contiguous 4KB page mappings that are within a * single page table page (PTP) to a single 2MB page mapping. For promotion @@ -4371,6 +4378,7 @@ CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx" " in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -4599,6 +4607,7 @@ unchanged: +#if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. @@ -4608,6 +4617,7 @@ (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va, &lock); +#endif rv = KERN_SUCCESS; out: @@ -7171,7 +7181,9 @@ { int rv; struct rwlock *lock; +#if VM_NRESERVLEVEL > 0 vm_page_t m, mpte; +#endif pd_entry_t *pde; pt_entry_t *pte, PG_A, PG_M, PG_RW, PG_V; @@ -7226,6 +7238,7 @@ *pte |= PG_A; } +#if VM_NRESERVLEVEL > 0 /* try to promote the mapping */ if (va < VM_MAXUSER_ADDRESS) mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); @@ -7243,6 +7256,8 @@ atomic_add_long(&ad_emulation_superpage_promotions, 1); #endif } +#endif + #ifdef INVARIANTS if (ftype == VM_PROT_WRITE) atomic_add_long(&num_dirty_emulations, 1); Index: head/sys/arm/arm/pmap-v6.c =================================================================== --- head/sys/arm/arm/pmap-v6.c +++ head/sys/arm/arm/pmap-v6.c @@ -3165,6 +3165,7 @@ } while (va < va_last); } +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { @@ -3198,6 +3199,7 @@ pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* * Conditionally create a pv entry. @@ -3405,6 +3407,7 @@ } #endif +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are * within a single page table page (PT2) to a single 1MB page mapping. @@ -3532,6 +3535,7 @@ PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Zero L2 page table page. @@ -4053,6 +4057,8 @@ va, opte2, npte2); } #endif + +#if VM_NRESERVLEVEL > 0 /* * If both the L2 page table page and the reservation are fully * populated, then attempt promotion. @@ -4061,6 +4067,7 @@ sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pte1(pmap, pte1p, va); +#endif sched_unpin(); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); Index: head/sys/arm64/arm64/pmap.c =================================================================== --- head/sys/arm64/arm64/pmap.c +++ head/sys/arm64/arm64/pmap.c @@ -105,6 +105,8 @@ * and to when physical maps must be made correct. */ +#include "opt_vm.h" + #include #include #include @@ -2677,6 +2679,7 @@ intr_restore(intr); } +#if VM_NRESERVLEVEL > 0 /* * After promotion from 512 4KB page mappings to a single 2MB page mapping, * replace the many pv entries for the 4KB page mappings by a single pv entry @@ -2790,6 +2793,7 @@ CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -3045,12 +3049,14 @@ (prot & VM_PROT_EXECUTE) != 0) cpu_icache_sync_range(va, PAGE_SIZE); +#if VM_NRESERVLEVEL > 0 if ((mpte == NULL || mpte->wire_count == NL3PG) && pmap_superpages_enabled() && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { pmap_promote_l2(pmap, pde, va, &lock); } +#endif } if (lock != NULL) Index: head/sys/i386/i386/pmap.c =================================================================== --- head/sys/i386/i386/pmap.c +++ head/sys/i386/i386/pmap.c @@ -100,6 +100,7 @@ #include "opt_cpu.h" #include "opt_pmap.h" #include "opt_smp.h" +#include "opt_vm.h" #include "opt_xbox.h" #include @@ -288,7 +289,9 @@ static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); +#endif static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -309,7 +312,9 @@ static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); +#if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); +#endif static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); @@ -2504,6 +2509,7 @@ } while (va < va_last); } +#if VM_NRESERVLEVEL > 0 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) { @@ -2537,6 +2543,7 @@ pmap_pvh_free(&m->md, pmap, va); } while (va < va_last); } +#endif /* VM_NRESERVLEVEL > 0 */ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) @@ -3312,6 +3319,7 @@ PMAP_UNLOCK(pmap); } +#if VM_NRESERVLEVEL > 0 /* * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are * within a single page table page (PTP) to a single 2- or 4MB page mapping. @@ -3448,6 +3456,7 @@ CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x" " in pmap %p", va, pmap); } +#endif /* VM_NRESERVLEVEL > 0 */ /* * Insert the given physical page (p) at @@ -3664,6 +3673,7 @@ pte_store(pte, newpte); } +#if VM_NRESERVLEVEL > 0 /* * If both the page table page and the reservation are fully * populated, then attempt promotion. @@ -3672,6 +3682,7 @@ pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); +#endif sched_unpin(); rw_wunlock(&pvh_global_lock);