Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -6747,8 +6747,7 @@ KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -7262,7 +7261,7 @@ { pt_entry_t newpte, *pte, PG_V; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PG_V = pmap_valid_bit(pmap); Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -3873,8 +3873,7 @@ KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, va)); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("%s: managed mapping within the clean submap", __func__)); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -4535,7 +4534,7 @@ struct spglist free; uint32_t l2prot; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("%s: managed mapping within the clean submap", __func__)); rw_assert(&pvh_global_lock, RA_WLOCKED); Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -4438,7 +4438,7 @@ vm_paddr_t pa; int lvl; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -3654,7 +3654,7 @@ ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", va)); KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || - va < kmi.clean_sva || va >= kmi.clean_eva, + !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -4108,8 +4108,8 @@ { pt_entry_t newpte, *pte; - KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || - va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, + KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -2125,8 +2125,7 @@ va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -2328,7 +2327,7 @@ pt_entry_t *pte, npte; vm_paddr_t pa; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_WLOCKED); Index: sys/powerpc/aim/mmu_radix.c =================================================================== --- sys/powerpc/aim/mmu_radix.c +++ sys/powerpc/aim/mmu_radix.c @@ -2813,8 +2813,7 @@ CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va, m, prot, flags, psind); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -3298,7 +3297,7 @@ pt_entry_t *pte; vm_paddr_t pa; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("mmu_radix_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -3103,7 +3103,7 @@ pd_entry_t *l2; pt_entry_t *l3, newl3; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_LOCKED); Index: sys/vm/vm.h =================================================================== --- sys/vm/vm.h +++ sys/vm/vm.h @@ -145,6 +145,9 @@ vm_offset_t clean_eva; }; +#define VA_IS_CLEANMAP(va) \ + ((va) >= kmi.clean_sva && (va) < kmi.clean_eva) + extern struct kva_md_info kmi; extern void vm_ksubmap_init(struct kva_md_info *);