Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -520,8 +520,8 @@ * vm_page_alloc_freelist(). Some functions support only a subset * of the flags, and ignore others, see the flags legend. * - * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*() - * and the vm_page_grab*() functions. See these functions for details. + * vm_page_grab* functions will set the page state to valid if + * VM_ALLOC_ZERO is set. * * Bits 0 - 1 define class. * Bits 2 - 15 dedicated for flags. Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -831,28 +831,11 @@ vm_page_trybusy(vm_page_t m, int allocflags) { - if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) - return (vm_page_trysbusy(m)); - else + if ((allocflags & VM_ALLOC_ZERO) != 0 || + (allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) return (vm_page_tryxbusy(m)); -} - -/* - * vm_page_tryacquire - * - * Helper routine for grab functions to trylock busy and wire. - * - * Returns true on success and false on failure. - */ -static inline bool -vm_page_tryacquire(vm_page_t m, int allocflags) -{ - bool locked; - - locked = vm_page_trybusy(m, allocflags); - if (locked && (allocflags & VM_ALLOC_WIRED) != 0) - vm_page_wire(m); - return (locked); + else + return (vm_page_trysbusy(m)); } /* @@ -867,16 +850,20 @@ vm_object_t obj; bool locked; + KASSERT((allocflags & (VM_ALLOC_ZERO | VM_ALLOC_WIRED)) == 0, + ("vm_page_busy_acquire: Invalid alloc flag.")); + KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0, + ("vm_page_busy_acquire: the pages must be busied")); + /* - * The page-specific object must be cached because page - * identity can change during the sleep, causing the - * re-lock of a different object. - * It is assumed that a reference to the object is already + * The page-specific object must be cached because page identity + * can change during the sleep, causing the re-lock of a different + * object. It is assumed that a reference to the object is already * held by the callers. */ obj = m->object; for (;;) { - if (vm_page_tryacquire(m, allocflags)) + if (vm_page_trybusy(m, allocflags)) return (true); if ((allocflags & VM_ALLOC_NOWAIT) != 0) return (false); @@ -4281,16 +4268,36 @@ * * Helper routine for grab functions to release busy on return. */ -static inline void +static void vm_page_grab_release(vm_page_t m, int allocflags) { - if ((allocflags & VM_ALLOC_NOBUSY) != 0) { - if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) - vm_page_sunbusy(m); + if ((allocflags & VM_ALLOC_WIRED) != 0) + vm_page_wire(m); + + /* Zero and validate the page if the caller requested. */ + if (!vm_page_all_valid(m) && (allocflags & VM_ALLOC_ZERO) != 0) { + if ((m->flags & PG_ZERO) != 0) + vm_page_valid(m); else - vm_page_xunbusy(m); + vm_page_zero_invalid(m, TRUE); } + /* + * Clear PG_ZERO so future grab calls with partially valid pages + * do not mistake the page for zeroed. + */ + m->flags &= ~PG_ZERO; + + /* + * Correct the busy state according to caller requirements. Grab + * will upgrade busy for ZERO or NOBUSY because a busy lock is + * required to wire or validate the page. The caller will expect + * the requested state on return. + */ + if ((allocflags & VM_ALLOC_NOBUSY) != 0) + vm_page_busy_release(m); + else if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) + vm_page_busy_downgrade(m); } /* @@ -4351,12 +4358,20 @@ { int pflags; + /* + * Drop flags handled directly by grab functions. + */ pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | - VM_ALLOC_NOBUSY); + VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_SBUSY); + + /* Grab will loop internally on failure. */ if ((allocflags & VM_ALLOC_NOWAIT) == 0) pflags |= VM_ALLOC_WAITFAIL; - if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) + + /* If flags are compatible use SBUSY. */ + if ((allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_ZERO)) == + VM_ALLOC_IGN_SBUSY) pflags |= VM_ALLOC_SBUSY; return (pflags); @@ -4383,7 +4398,7 @@ retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { - if (!vm_page_tryacquire(m, allocflags)) { + if (!vm_page_trybusy(m, allocflags)) { if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", allocflags, true)) goto retrylookup; @@ -4399,9 +4414,6 @@ return (NULL); goto retrylookup; } - if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - out: vm_page_grab_release(m, allocflags); @@ -4465,8 +4477,6 @@ allocflags, false)) return (false); } - if ((allocflags & VM_ALLOC_WIRED) != 0) - vm_page_wire(m); vm_page_grab_release(m, allocflags); *mp = m; return (true); @@ -4525,9 +4535,9 @@ (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); VM_OBJECT_ASSERT_WLOCKED(object); - pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | - VM_ALLOC_WIRED); - pflags |= VM_ALLOC_WAITFAIL; + /* We never want to allocate non-busy pages. */ + pflags = vm_page_grab_pflags( + allocflags & ~(VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)); retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { @@ -4599,15 +4609,11 @@ vm_page_readahead_finish(ma[i]); MPASS(vm_page_all_valid(m)); } else { + /* Unlike other grab functions zero is implied. */ vm_page_zero_invalid(m, TRUE); } out: - if ((allocflags & VM_ALLOC_WIRED) != 0) - vm_page_wire(m); - if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) - vm_page_busy_downgrade(m); - else if ((allocflags & VM_ALLOC_NOBUSY) != 0) - vm_page_busy_release(m); + vm_page_grab_release(m, allocflags); *mp = m; return (VM_PAGER_OK); } @@ -4641,8 +4647,6 @@ return (VM_PAGER_FAIL); if ((m = *mp) != NULL) { if (vm_page_all_valid(m)) { - if ((allocflags & VM_ALLOC_WIRED) != 0) - vm_page_wire(m); vm_page_grab_release(m, allocflags); return (VM_PAGER_OK); } @@ -4712,7 +4716,7 @@ mpred = TAILQ_PREV(m, pglist, listq); for (; i < count; i++) { if (m != NULL) { - if (!vm_page_tryacquire(m, allocflags)) { + if (!vm_page_trybusy(m, allocflags)) { if (vm_page_grab_sleep(object, m, pindex, "grbmaw", allocflags, true)) goto retrylookup; @@ -4730,12 +4734,6 @@ goto retrylookup; } } - if (vm_page_none_valid(m) && - (allocflags & VM_ALLOC_ZERO) != 0) { - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - vm_page_valid(m); - } vm_page_grab_release(m, allocflags); ma[i] = mpred = m; m = vm_page_next(m); @@ -4752,29 +4750,17 @@ int allocflags, vm_page_t *ma, int count) { vm_page_t m, pred; - int flags; int i; vm_page_grab_check(allocflags); - - /* - * Modify flags for lockless acquire to hold the page until we - * set it valid if necessary. - */ - flags = allocflags & ~VM_ALLOC_NOBUSY; pred = NULL; for (i = 0; i < count; i++, pindex++) { - if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) + if (!vm_page_acquire_unlocked(object, pindex, pred, &m, + allocflags)) return (i); if (m == NULL) break; - if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { - if ((m->flags & PG_ZERO) == 0) - pmap_zero_page(m); - vm_page_valid(m); - } /* m will still be wired or busy according to flags. */ - vm_page_grab_release(m, allocflags); pred = ma[i] = m; } if ((allocflags & VM_ALLOC_NOCREAT) != 0) @@ -5212,6 +5198,12 @@ int b; int i; + /* Short circuit for all invalid. */ + if (vm_page_none_valid(m)) { + pmap_zero_page(m); + goto out; + } + /* * Scan the valid bits looking for invalid sections that * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the @@ -5228,7 +5220,7 @@ b = i + 1; } } - +out: /* * setvalid is TRUE when we can safely set the zero'd areas * as being valid. We can do this if there are no cache consistancy