Index: sys/vm/vm_pageout.c =================================================================== --- sys/vm/vm_pageout.c +++ sys/vm/vm_pageout.c @@ -402,6 +402,8 @@ */ vm_page_assert_unbusied(m); KASSERT(m->hold_count == 0, ("page %p is held", m)); + + pmap_remove_write(m); vm_page_unlock(m); mc[vm_pageout_page_count] = pb = ps = m; @@ -444,6 +446,7 @@ ib = 0; break; } + pmap_remove_write(p); vm_page_unlock(p); mc[--page_base] = pb = p; ++pageout_count; @@ -469,6 +472,7 @@ vm_page_unlock(p); break; } + pmap_remove_write(p); vm_page_unlock(p); mc[page_base + pageout_count] = ps = p; ++pageout_count; @@ -513,8 +517,8 @@ VM_OBJECT_ASSERT_WLOCKED(object); /* - * Initiate I/O. Bump the vm_page_t->busy counter and - * mark the pages read-only. + * Initiate I/O. Mark the pages busy and verify that they're valid + * and read-only. * * We do not have to fixup the clean/dirty bits here... we can * allow the pager to do it after the I/O completes. @@ -526,8 +530,9 @@ KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush: partially invalid page %p index %d/%d", mc[i], i, count)); + KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, + ("vm_pageout_flush: writeable page %p", mc[i])); vm_page_sbusy(mc[i]); - pmap_remove_write(mc[i]); } vm_object_pip_add(object, count);