Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -7133,12 +7133,9 @@ ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PG_M set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } @@ -7201,14 +7198,10 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + vm_page_assert_busied(m); + if (!pmap_page_is_write_mapped(m)) return; + lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -7681,16 +7674,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. - * If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); Index: sys/arm/arm/pmap-v4.c =================================================================== --- sys/arm/arm/pmap-v4.c +++ sys/arm/arm/pmap-v4.c @@ -4095,16 +4095,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no mappings can be modified. - * If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; if (m->md.pvh_attrs & PVF_MOD) pmap_clearbit(m, PVF_MOD); @@ -4136,14 +4129,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) + if (pmap_page_is_write_mapped(m)) pmap_clearbit(m, PVF_WRITE); } Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c +++ sys/arm/arm/pmap-v6.c @@ -5192,12 +5192,9 @@ ("%s: page %p is not managed", __func__, m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTE2s can have PG_M set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || @@ -5533,14 +5530,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("%s: page %p is not managed", __func__, m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); sched_pin(); @@ -5691,17 +5683,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("%s: page %p is not managed", __func__, m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("%s: page %p is exclusive busy", __func__, m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM - * cleared. If the object containing the page is locked and the page - * is not exclusive busied, then PGA_WRITEABLE cannot be concurrently - * set. - */ - if ((m->flags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); sched_pin(); Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -4529,12 +4529,9 @@ ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PG_M set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } @@ -4593,14 +4590,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : @@ -4968,16 +4960,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have ATTR_SW_DBM - * set. If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(VM_PAGE_TO_PHYS(m)); Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -4843,12 +4843,9 @@ ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PG_M set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || @@ -4972,14 +4969,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); sched_pin(); @@ -5282,16 +5274,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. - * If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); sched_pin(); Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c +++ sys/mips/mips/pmap.c @@ -2927,14 +2927,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2994,13 +2989,11 @@ ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PTE_D set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); + rw_wlock(&pvh_global_lock); rv = pmap_testbit(m, PTE_D); rw_wunlock(&pvh_global_lock); @@ -3135,15 +3128,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. - * If the object containing the page is locked and the page is not - * write busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { Index: sys/powerpc/aim/mmu_oea.c =================================================================== --- sys/powerpc/aim/mmu_oea.c +++ sys/powerpc/aim/mmu_oea.c @@ -1314,13 +1314,11 @@ ("moea_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PTE_CHG set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); + rw_wlock(&pvh_global_lock); rv = moea_query_bit(m, PTE_CHG); rw_wunlock(&pvh_global_lock); @@ -1346,16 +1344,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("moea_clear_modify: page %p is exclusive busy", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG - * set. If the object containing the page is locked and the page is - * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); moea_clear_bit(m, PTE_CHG); @@ -1375,14 +1366,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; rw_wlock(&pvh_global_lock); lo = moea_attr_fetch(m); Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -1683,13 +1683,11 @@ ("moea64_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have LPTE_CHG set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_LOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); + return (moea64_query_bit(mmu, m, LPTE_CHG)); } @@ -1713,16 +1711,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("moea64_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG - * set. If the object containing the page is locked and the page is - * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; moea64_clear_bit(mmu, m, LPTE_CHG); } @@ -1739,15 +1730,11 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); + + if (!pmap_page_is_write_mapped(m)) + return - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) - return; powerpc_sync(); PV_PAGE_LOCK(m); refchg = 0; Index: sys/powerpc/booke/pmap.c =================================================================== --- sys/powerpc/booke/pmap.c +++ sys/powerpc/booke/pmap.c @@ -2687,15 +2687,10 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) - return; + if (!pmap_page_is_write_mapped(m)) + return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); @@ -3035,13 +3030,11 @@ rv = FALSE; /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can be modified. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) - return (rv); + if (!pmap_page_is_write_mapped(m)) + return (FALSE); + rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); @@ -3110,17 +3103,11 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("mmu_booke_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); + + if (!pmap_page_is_write_mapped(m)) + return; - /* - * If the page is not PG_AWRITEABLE, then no PTEs can be modified. - * If the object containing the page is locked and the page is not - * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) - return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -3784,12 +3784,9 @@ ("pmap_is_modified: page %p is not managed", m)); /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no PTEs can have PG_M set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (FALSE); return (pmap_page_test_mappings(m, FALSE, TRUE)); } @@ -3848,14 +3845,9 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; lock = VM_PAGE_TO_PV_LIST_LOCK(m); pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : @@ -4106,9 +4098,10 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); + + if (!pmap_page_is_write_mapped(m)) + return; /* * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c +++ sys/sparc64/sparc64/pmap.c @@ -2116,12 +2116,9 @@ rv = FALSE; /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * concurrently set while the object is locked. Thus, if PGA_WRITEABLE - * is clear, no TTEs can have TD_W set. + * If the page is not busied then this check is racy. */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return (rv); rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { @@ -2195,17 +2192,11 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); + vm_page_assert_busied(m); - /* - * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set. - * If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) + if (!pmap_page_is_write_mapped(m)) return; + rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) @@ -2225,15 +2216,11 @@ KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); + vm_page_assert_busied(m); + + if (!pmap_page_is_write_mapped(m)) + return; - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) - return; rw_wlock(&tte_list_global_lock); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -662,6 +662,11 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); #endif +#define vm_page_assert_busied(m) \ + KASSERT(vm_page_busied(m), \ + ("vm_page_assert_busied: page %p not busy @ %s:%d", \ + (m), __FILE__, __LINE__)) + #define vm_page_assert_sbusied(m) \ KASSERT(vm_page_sbusied(m), \ ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \