Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 825 Lines • ▼ Show 20 Lines | |||||
* Helper routine for grab functions to trylock busy. | * Helper routine for grab functions to trylock busy. | ||||
* | * | ||||
* Returns true on success and false on failure. | * Returns true on success and false on failure. | ||||
*/ | */ | ||||
static bool | static bool | ||||
vm_page_trybusy(vm_page_t m, int allocflags) | vm_page_trybusy(vm_page_t m, int allocflags) | ||||
{ | { | ||||
if ((allocflags & (VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)) != 0) | if ((allocflags & VM_ALLOC_ZERO) != 0 || | ||||
return (vm_page_trysbusy(m)); | (allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) | ||||
else | |||||
return (vm_page_tryxbusy(m)); | return (vm_page_tryxbusy(m)); | ||||
else | |||||
return (vm_page_trysbusy(m)); | |||||
} | } | ||||
/* | /* | ||||
* vm_page_tryacquire | |||||
* | |||||
* Helper routine for grab functions to trylock busy and wire. | |||||
* | |||||
* Returns true on success and false on failure. | |||||
*/ | |||||
static inline bool | |||||
vm_page_tryacquire(vm_page_t m, int allocflags) | |||||
{ | |||||
bool locked; | |||||
locked = vm_page_trybusy(m, allocflags); | |||||
if (locked && (allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
return (locked); | |||||
} | |||||
/* | |||||
* vm_page_busy_acquire: | * vm_page_busy_acquire: | ||||
* | * | ||||
* Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | * Acquire the busy lock as described by VM_ALLOC_* flags. Will loop | ||||
* and drop the object lock if necessary. | * and drop the object lock if necessary. | ||||
*/ | */ | ||||
bool | bool | ||||
vm_page_busy_acquire(vm_page_t m, int allocflags) | vm_page_busy_acquire(vm_page_t m, int allocflags) | ||||
{ | { | ||||
vm_object_t obj; | vm_object_t obj; | ||||
bool locked; | bool locked; | ||||
KASSERT((allocflags & (VM_ALLOC_ZERO | VM_ALLOC_WIRED)) == 0, | |||||
("vm_page_busy_acquire: Invalid alloc flag.")); | |||||
KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0, | |||||
("vm_page_busy_acquire: the pages must be busied")); | |||||
/* | /* | ||||
* The page-specific object must be cached because page | * The page-specific object must be cached because page identity | ||||
* identity can change during the sleep, causing the | * can change during the sleep, causing the re-lock of a different | ||||
* re-lock of a different object. | * object. It is assumed that a reference to the object is already | ||||
* It is assumed that a reference to the object is already | |||||
* held by the callers. | * held by the callers. | ||||
*/ | */ | ||||
obj = m->object; | obj = m->object; | ||||
for (;;) { | for (;;) { | ||||
if (vm_page_tryacquire(m, allocflags)) | if (vm_page_trybusy(m, allocflags)) | ||||
return (true); | return (true); | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
return (false); | return (false); | ||||
if (obj != NULL) | if (obj != NULL) | ||||
locked = VM_OBJECT_WOWNED(obj); | locked = VM_OBJECT_WOWNED(obj); | ||||
else | else | ||||
locked = false; | locked = false; | ||||
MPASS(locked || vm_page_wired(m)); | MPASS(locked || vm_page_wired(m)); | ||||
▲ Show 20 Lines • Show All 3,386 Lines • ▼ Show 20 Lines | if (m->dirty == 0) | ||||
vm_page_deactivate_noreuse(m); | vm_page_deactivate_noreuse(m); | ||||
else if (!vm_page_in_laundry(m)) | else if (!vm_page_in_laundry(m)) | ||||
vm_page_launder(m); | vm_page_launder(m); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_grab_release | * vm_page_grab_release | ||||
* | * | ||||
* Helper routine for grab functions to release busy on return. | * Helper routine for grab functions to release busy on return. | ||||
markj: This is stale now. Maybe, "Helper routine for grab functions to finalize page state before… | |||||
*/ | */ | ||||
static inline void | static void | ||||
vm_page_grab_release(vm_page_t m, int allocflags) | vm_page_grab_release(vm_page_t m, int allocflags) | ||||
{ | { | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | if ((allocflags & VM_ALLOC_WIRED) != 0) | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | vm_page_wire(m); | ||||
vm_page_sunbusy(m); | |||||
/* Zero and validate the page if the caller requested. */ | |||||
if (!vm_page_all_valid(m) && (allocflags & VM_ALLOC_ZERO) != 0) { | |||||
if ((m->flags & PG_ZERO) != 0) | |||||
vm_page_valid(m); | |||||
else | else | ||||
vm_page_xunbusy(m); | vm_page_zero_invalid(m, TRUE); | ||||
} | } | ||||
/* | |||||
* Clear PG_ZERO so future grab calls with partially valid pages | |||||
* do not mistake the page for zeroed. | |||||
*/ | |||||
m->flags &= ~PG_ZERO; | |||||
markjUnsubmitted Not Done Inline ActionsWhy do we do this outside the block above? PG_ZERO will only be set if the page is freshly allocated and VM_ALLOC_ZERO was requested. Note that there is nothing synchronizing this update in general. In practice it is ok because in the scenario above we will be holding the object lock. markj: Why do we do this outside the block above? PG_ZERO will only be set if the page is freshly… | |||||
jeffAuthorUnsubmitted Done Inline ActionsWe are not actually guaranteed to have the object lock. We are guaranteed to have an exclusive busy in the block above. Let me look more closely at flags and think about this. jeff: We are not actually guaranteed to have the object lock. We are guaranteed to have an exclusive… | |||||
/* | |||||
* Correct the busy state according to caller requirements. Grab | |||||
* will upgrade busy for ZERO or NOBUSY because a busy lock is | |||||
* required to wire or validate the page. The caller will expect | |||||
* the requested state on return. | |||||
*/ | |||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) | |||||
vm_page_busy_release(m); | |||||
else if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) | |||||
vm_page_busy_downgrade(m); | |||||
} | } | ||||
/* | /* | ||||
* vm_page_grab_sleep | * vm_page_grab_sleep | ||||
* | * | ||||
* Sleep for busy according to VM_ALLOC_ parameters. Returns true | * Sleep for busy according to VM_ALLOC_ parameters. Returns true | ||||
* if the caller should retry and false otherwise. | * if the caller should retry and false otherwise. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Calculate the page allocation flags for grab. | * Calculate the page allocation flags for grab. | ||||
*/ | */ | ||||
static inline int | static inline int | ||||
vm_page_grab_pflags(int allocflags) | vm_page_grab_pflags(int allocflags) | ||||
{ | { | ||||
int pflags; | int pflags; | ||||
/* | |||||
* Drop flags handled directly by grab functions. | |||||
*/ | |||||
pflags = allocflags & | pflags = allocflags & | ||||
~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | | ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL | | ||||
VM_ALLOC_NOBUSY); | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_SBUSY); | ||||
/* Grab will loop internally on failure. */ | |||||
if ((allocflags & VM_ALLOC_NOWAIT) == 0) | if ((allocflags & VM_ALLOC_NOWAIT) == 0) | ||||
pflags |= VM_ALLOC_WAITFAIL; | pflags |= VM_ALLOC_WAITFAIL; | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | |||||
/* If flags are compatible use SBUSY. */ | |||||
if ((allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_ZERO)) == | |||||
VM_ALLOC_IGN_SBUSY) | |||||
pflags |= VM_ALLOC_SBUSY; | pflags |= VM_ALLOC_SBUSY; | ||||
return (pflags); | return (pflags); | ||||
} | } | ||||
/* | /* | ||||
* Grab a page, waiting until we are waken up due to the page | * Grab a page, waiting until we are waken up due to the page | ||||
* changing state. We keep on waiting, if the page continues | * changing state. We keep on waiting, if the page continues | ||||
Show All 10 Lines | |||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
vm_page_grab_check(allocflags); | vm_page_grab_check(allocflags); | ||||
retrylookup: | retrylookup: | ||||
if ((m = vm_page_lookup(object, pindex)) != NULL) { | if ((m = vm_page_lookup(object, pindex)) != NULL) { | ||||
if (!vm_page_tryacquire(m, allocflags)) { | if (!vm_page_trybusy(m, allocflags)) { | ||||
if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", | if (vm_page_grab_sleep(object, m, pindex, "pgrbwt", | ||||
allocflags, true)) | allocflags, true)) | ||||
goto retrylookup; | goto retrylookup; | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
goto out; | goto out; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) | ||||
return (NULL); | return (NULL); | ||||
m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); | m = vm_page_alloc(object, pindex, vm_page_grab_pflags(allocflags)); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) | if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0) | ||||
return (NULL); | return (NULL); | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
out: | out: | ||||
vm_page_grab_release(m, allocflags); | vm_page_grab_release(m, allocflags); | ||||
return (m); | return (m); | ||||
} | } | ||||
/* | /* | ||||
* Locklessly attempt to acquire a page given a (object, pindex) tuple | * Locklessly attempt to acquire a page given a (object, pindex) tuple | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | if (vm_page_trybusy(m, allocflags)) { | ||||
vm_page_busy_release(m); | vm_page_busy_release(m); | ||||
cpu_spinwait(); | cpu_spinwait(); | ||||
continue; | continue; | ||||
} | } | ||||
if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", | if (!vm_page_grab_sleep(object, m, pindex, "pgnslp", | ||||
allocflags, false)) | allocflags, false)) | ||||
return (false); | return (false); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
vm_page_grab_release(m, allocflags); | vm_page_grab_release(m, allocflags); | ||||
*mp = m; | *mp = m; | ||||
return (true); | return (true); | ||||
} | } | ||||
/* | /* | ||||
* Try to locklessly grab a page and fall back to the object lock if NOCREAT | * Try to locklessly grab a page and fall back to the object lock if NOCREAT | ||||
* is not set. | * is not set. | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags) | ||||
KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 || | ||||
(allocflags & VM_ALLOC_IGN_SBUSY) != 0, | (allocflags & VM_ALLOC_IGN_SBUSY) != 0, | ||||
("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); | ("vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch")); | ||||
KASSERT((allocflags & | KASSERT((allocflags & | ||||
(VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, | (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL | VM_ALLOC_ZERO)) == 0, | ||||
("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); | ("vm_page_grab_valid: Invalid flags 0x%X", allocflags)); | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY | | /* We never want to allocate non-busy pages. */ | ||||
VM_ALLOC_WIRED); | pflags = vm_page_grab_pflags( | ||||
pflags |= VM_ALLOC_WAITFAIL; | allocflags & ~(VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY)); | ||||
retrylookup: | retrylookup: | ||||
if ((m = vm_page_lookup(object, pindex)) != NULL) { | if ((m = vm_page_lookup(object, pindex)) != NULL) { | ||||
/* | /* | ||||
* If the page is fully valid it can only become invalid | * If the page is fully valid it can only become invalid | ||||
* with the object lock held. If it is not valid it can | * with the object lock held. If it is not valid it can | ||||
* become valid with the busy lock held. Therefore, we | * become valid with the busy lock held. Therefore, we | ||||
* may unnecessarily lock the exclusive busy here if we | * may unnecessarily lock the exclusive busy here if we | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | if (rv != VM_PAGER_OK) { | ||||
} | } | ||||
*mp = NULL; | *mp = NULL; | ||||
return (rv); | return (rv); | ||||
} | } | ||||
for (i = 1; i < after; i++) | for (i = 1; i < after; i++) | ||||
vm_page_readahead_finish(ma[i]); | vm_page_readahead_finish(ma[i]); | ||||
MPASS(vm_page_all_valid(m)); | MPASS(vm_page_all_valid(m)); | ||||
} else { | } else { | ||||
/* Unlike other grab functions zero is implied. */ | |||||
vm_page_zero_invalid(m, TRUE); | vm_page_zero_invalid(m, TRUE); | ||||
} | } | ||||
out: | out: | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | vm_page_grab_release(m, allocflags); | ||||
vm_page_wire(m); | |||||
if ((allocflags & VM_ALLOC_SBUSY) != 0 && vm_page_xbusied(m)) | |||||
vm_page_busy_downgrade(m); | |||||
else if ((allocflags & VM_ALLOC_NOBUSY) != 0) | |||||
vm_page_busy_release(m); | |||||
*mp = m; | *mp = m; | ||||
return (VM_PAGER_OK); | return (VM_PAGER_OK); | ||||
} | } | ||||
/* | /* | ||||
* Locklessly grab a valid page. If the page is not valid or not yet | * Locklessly grab a valid page. If the page is not valid or not yet | ||||
* allocated this will fall back to the object lock method. | * allocated this will fall back to the object lock method. | ||||
*/ | */ | ||||
Show All 17 Lines | vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, | ||||
* Attempt a lockless lookup and busy. We need at least an sbusy | * Attempt a lockless lookup and busy. We need at least an sbusy | ||||
* before we can inspect the valid field and return a wired page. | * before we can inspect the valid field and return a wired page. | ||||
*/ | */ | ||||
flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); | flags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_WIRED); | ||||
if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) | if (!vm_page_acquire_unlocked(object, pindex, NULL, mp, flags)) | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
if ((m = *mp) != NULL) { | if ((m = *mp) != NULL) { | ||||
if (vm_page_all_valid(m)) { | if (vm_page_all_valid(m)) { | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | |||||
vm_page_wire(m); | |||||
vm_page_grab_release(m, allocflags); | vm_page_grab_release(m, allocflags); | ||||
return (VM_PAGER_OK); | return (VM_PAGER_OK); | ||||
} | } | ||||
vm_page_busy_release(m); | vm_page_busy_release(m); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | ||||
*mp = NULL; | *mp = NULL; | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | retrylookup: | ||||
m = vm_radix_lookup_le(&object->rtree, pindex + i); | m = vm_radix_lookup_le(&object->rtree, pindex + i); | ||||
if (m == NULL || m->pindex != pindex + i) { | if (m == NULL || m->pindex != pindex + i) { | ||||
mpred = m; | mpred = m; | ||||
m = NULL; | m = NULL; | ||||
} else | } else | ||||
mpred = TAILQ_PREV(m, pglist, listq); | mpred = TAILQ_PREV(m, pglist, listq); | ||||
for (; i < count; i++) { | for (; i < count; i++) { | ||||
if (m != NULL) { | if (m != NULL) { | ||||
if (!vm_page_tryacquire(m, allocflags)) { | if (!vm_page_trybusy(m, allocflags)) { | ||||
if (vm_page_grab_sleep(object, m, pindex, | if (vm_page_grab_sleep(object, m, pindex, | ||||
"grbmaw", allocflags, true)) | "grbmaw", allocflags, true)) | ||||
goto retrylookup; | goto retrylookup; | ||||
break; | break; | ||||
} | } | ||||
} else { | } else { | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) | ||||
break; | break; | ||||
m = vm_page_alloc_after(object, pindex + i, | m = vm_page_alloc_after(object, pindex + i, | ||||
pflags | VM_ALLOC_COUNT(count - i), mpred); | pflags | VM_ALLOC_COUNT(count - i), mpred); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if ((allocflags & (VM_ALLOC_NOWAIT | | if ((allocflags & (VM_ALLOC_NOWAIT | | ||||
VM_ALLOC_WAITFAIL)) != 0) | VM_ALLOC_WAITFAIL)) != 0) | ||||
break; | break; | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
} | } | ||||
if (vm_page_none_valid(m) && | |||||
(allocflags & VM_ALLOC_ZERO) != 0) { | |||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
vm_page_valid(m); | |||||
} | |||||
vm_page_grab_release(m, allocflags); | vm_page_grab_release(m, allocflags); | ||||
ma[i] = mpred = m; | ma[i] = mpred = m; | ||||
m = vm_page_next(m); | m = vm_page_next(m); | ||||
} | } | ||||
return (i); | return (i); | ||||
} | } | ||||
/* | /* | ||||
* Unlocked variant of vm_page_grab_pages(). This accepts the same flags | * Unlocked variant of vm_page_grab_pages(). This accepts the same flags | ||||
* and will fall back to the locked variant to handle allocation. | * and will fall back to the locked variant to handle allocation. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, | vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, | ||||
int allocflags, vm_page_t *ma, int count) | int allocflags, vm_page_t *ma, int count) | ||||
{ | { | ||||
vm_page_t m, pred; | vm_page_t m, pred; | ||||
int flags; | |||||
int i; | int i; | ||||
vm_page_grab_check(allocflags); | vm_page_grab_check(allocflags); | ||||
/* | |||||
* Modify flags for lockless acquire to hold the page until we | |||||
* set it valid if necessary. | |||||
*/ | |||||
flags = allocflags & ~VM_ALLOC_NOBUSY; | |||||
pred = NULL; | pred = NULL; | ||||
for (i = 0; i < count; i++, pindex++) { | for (i = 0; i < count; i++, pindex++) { | ||||
if (!vm_page_acquire_unlocked(object, pindex, pred, &m, flags)) | if (!vm_page_acquire_unlocked(object, pindex, pred, &m, | ||||
allocflags)) | |||||
return (i); | return (i); | ||||
if (m == NULL) | if (m == NULL) | ||||
break; | break; | ||||
if ((flags & VM_ALLOC_ZERO) != 0 && vm_page_none_valid(m)) { | |||||
if ((m->flags & PG_ZERO) == 0) | |||||
pmap_zero_page(m); | |||||
vm_page_valid(m); | |||||
} | |||||
/* m will still be wired or busy according to flags. */ | /* m will still be wired or busy according to flags. */ | ||||
vm_page_grab_release(m, allocflags); | |||||
pred = ma[i] = m; | pred = ma[i] = m; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0) | if ((allocflags & VM_ALLOC_NOCREAT) != 0) | ||||
return (i); | return (i); | ||||
count -= i; | count -= i; | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); | i += vm_page_grab_pages(object, pindex, allocflags, &ma[i], count); | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
▲ Show 20 Lines • Show All 421 Lines • ▼ Show 20 Lines | |||||
* into memory and the file's size is not page aligned. | * into memory and the file's size is not page aligned. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) | vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) | ||||
{ | { | ||||
int b; | int b; | ||||
int i; | int i; | ||||
/* Short circuit for all invalid. */ | |||||
if (vm_page_none_valid(m)) { | |||||
pmap_zero_page(m); | |||||
goto out; | |||||
} | |||||
/* | /* | ||||
* Scan the valid bits looking for invalid sections that | * Scan the valid bits looking for invalid sections that | ||||
* must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the | * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the | ||||
* valid bit may be set ) have already been zeroed by | * valid bit may be set ) have already been zeroed by | ||||
* vm_page_set_validclean(). | * vm_page_set_validclean(). | ||||
*/ | */ | ||||
for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { | for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { | ||||
if (i == (PAGE_SIZE / DEV_BSIZE) || | if (i == (PAGE_SIZE / DEV_BSIZE) || | ||||
(m->valid & ((vm_page_bits_t)1 << i))) { | (m->valid & ((vm_page_bits_t)1 << i))) { | ||||
if (i > b) { | if (i > b) { | ||||
pmap_zero_page_area(m, | pmap_zero_page_area(m, | ||||
b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); | b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); | ||||
} | } | ||||
b = i + 1; | b = i + 1; | ||||
} | } | ||||
} | } | ||||
out: | |||||
/* | /* | ||||
* setvalid is TRUE when we can safely set the zero'd areas | * setvalid is TRUE when we can safely set the zero'd areas | ||||
* as being valid. We can do this if there are no cache consistancy | * as being valid. We can do this if there are no cache consistancy | ||||
* issues. e.g. it is ok to do with UFS, but not ok to do with NFS. | * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. | ||||
*/ | */ | ||||
if (setvalid) | if (setvalid) | ||||
vm_page_valid(m); | vm_page_valid(m); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 223 Lines • Show Last 20 Lines |
This is stale now. Maybe, "Helper routine for grab functions to finalize page state before returning."