Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 1,300 Lines • ▼ Show 20 Lines | |||||
* Unbusy and handle the page queueing for a page from a getpages request that | * Unbusy and handle the page queueing for a page from a getpages request that | ||||
* was optionally read ahead or behind. | * was optionally read ahead or behind. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_readahead_finish(vm_page_t m) | vm_page_readahead_finish(vm_page_t m) | ||||
{ | { | ||||
/* We shouldn't put invalid pages on queues. */ | /* We shouldn't put invalid pages on queues. */ | ||||
KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m)); | KASSERT(!vm_page_none_valid(m), ("%s: %p is invalid", __func__, m)); | ||||
/* | /* | ||||
* Since the page is not the actually needed one, whether it should | * Since the page is not the actually needed one, whether it should | ||||
* be activated or deactivated is not obvious. Empirical results | * be activated or deactivated is not obvious. Empirical results | ||||
* have shown that deactivating the page is usually the best choice, | * have shown that deactivating the page is usually the best choice, | ||||
* unless the page is wanted by another thread. | * unless the page is wanted by another thread. | ||||
*/ | */ | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | |||||
* | * | ||||
* This function should only be called by vm_page_dirty(). | * This function should only be called by vm_page_dirty(). | ||||
*/ | */ | ||||
void | void | ||||
vm_page_dirty_KBI(vm_page_t m) | vm_page_dirty_KBI(vm_page_t m) | ||||
{ | { | ||||
/* Refer to this operation by its public name. */ | /* Refer to this operation by its public name. */ | ||||
KASSERT(m->valid == VM_PAGE_BITS_ALL, | KASSERT(vm_page_all_valid(m), ("vm_page_dirty: page is invalid!")); | ||||
("vm_page_dirty: page is invalid!")); | |||||
m->dirty = VM_PAGE_BITS_ALL; | m->dirty = VM_PAGE_BITS_ALL; | ||||
} | } | ||||
/* | /* | ||||
* vm_page_insert: [ internal use only ] | * vm_page_insert: [ internal use only ] | ||||
* | * | ||||
* Inserts the given mem entry into the object and object list. | * Inserts the given mem entry into the object and object list. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 1,204 Lines • ▼ Show 20 Lines | else if ((object = m->object) != NULL) { | ||||
!vm_page_wired(m) && vm_page_tryxbusy(m) != 0) { | !vm_page_wired(m) && vm_page_tryxbusy(m) != 0) { | ||||
KASSERT(pmap_page_get_memattr(m) == | KASSERT(pmap_page_get_memattr(m) == | ||||
VM_MEMATTR_DEFAULT, | VM_MEMATTR_DEFAULT, | ||||
("page %p has an unexpected memattr", m)); | ("page %p has an unexpected memattr", m)); | ||||
KASSERT((m->oflags & (VPO_SWAPINPROG | | KASSERT((m->oflags & (VPO_SWAPINPROG | | ||||
VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0, | ||||
("page %p has unexpected oflags", m)); | ("page %p has unexpected oflags", m)); | ||||
/* Don't care: VPO_NOSYNC. */ | /* Don't care: VPO_NOSYNC. */ | ||||
if (m->valid != 0) { | if (!vm_page_none_valid(m)) { | ||||
/* | /* | ||||
* First, try to allocate a new page | * First, try to allocate a new page | ||||
* that is above "high". Failing | * that is above "high". Failing | ||||
* that, try to allocate a new page | * that, try to allocate a new page | ||||
* that is below "m_run". Allocate | * that is below "m_run". Allocate | ||||
* the new page between the end of | * the new page between the end of | ||||
* "m_run" and "high" only as a last | * "m_run" and "high" only as a last | ||||
* resort. | * resort. | ||||
▲ Show 20 Lines • Show All 1,612 Lines • ▼ Show 20 Lines | if ((m = vm_page_lookup(object, pindex)) != NULL) { | ||||
* If the page is fully valid it can only become invalid | * If the page is fully valid it can only become invalid | ||||
* with the object lock held. If it is not valid it can | * with the object lock held. If it is not valid it can | ||||
* become valid with the busy lock held. Therefore, we | * become valid with the busy lock held. Therefore, we | ||||
* may unnecessarily lock the exclusive busy here if we | * may unnecessarily lock the exclusive busy here if we | ||||
* race with I/O completion not using the object lock. | * race with I/O completion not using the object lock. | ||||
* However, we will not end up with an invalid page and a | * However, we will not end up with an invalid page and a | ||||
* shared lock. | * shared lock. | ||||
*/ | */ | ||||
if (m->valid != VM_PAGE_BITS_ALL || | if (!vm_page_all_valid(m) || | ||||
(allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) { | (allocflags & (VM_ALLOC_IGN_SBUSY | VM_ALLOC_SBUSY)) == 0) { | ||||
sleep = !vm_page_tryxbusy(m); | sleep = !vm_page_tryxbusy(m); | ||||
xbusy = true; | xbusy = true; | ||||
} else | } else | ||||
sleep = !vm_page_trysbusy(m); | sleep = !vm_page_trysbusy(m); | ||||
if (sleep) { | if (sleep) { | ||||
/* | /* | ||||
* Reference the page before unlocking and | * Reference the page before unlocking and | ||||
* sleeping so that the page daemon is less | * sleeping so that the page daemon is less | ||||
* likely to reclaim it. | * likely to reclaim it. | ||||
*/ | */ | ||||
if ((allocflags & VM_ALLOC_NOCREAT) == 0) | if ((allocflags & VM_ALLOC_NOCREAT) == 0) | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
vm_page_busy_sleep(m, "pgrbwt", (allocflags & | vm_page_busy_sleep(m, "pgrbwt", (allocflags & | ||||
VM_ALLOC_IGN_SBUSY) != 0); | VM_ALLOC_IGN_SBUSY) != 0); | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOCREAT) != 0 && | if ((allocflags & VM_ALLOC_NOCREAT) != 0 && | ||||
m->valid != VM_PAGE_BITS_ALL) { | !vm_page_all_valid(m)) { | ||||
if (xbusy) | if (xbusy) | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
else | else | ||||
vm_page_sunbusy(m); | vm_page_sunbusy(m); | ||||
*mp = NULL; | *mp = NULL; | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_WIRED) != 0) | if ((allocflags & VM_ALLOC_WIRED) != 0) | ||||
vm_page_wire(m); | vm_page_wire(m); | ||||
if (m->valid == VM_PAGE_BITS_ALL) | if (vm_page_all_valid(m)) | ||||
goto out; | goto out; | ||||
} else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | } else if ((allocflags & VM_ALLOC_NOCREAT) != 0) { | ||||
*mp = NULL; | *mp = NULL; | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
} else if ((m = vm_page_alloc(object, pindex, pflags)) != NULL) { | } else if ((m = vm_page_alloc(object, pindex, pflags)) != NULL) { | ||||
xbusy = true; | xbusy = true; | ||||
} else { | } else { | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
vm_page_assert_xbusied(m); | vm_page_assert_xbusied(m); | ||||
MPASS(xbusy); | MPASS(xbusy); | ||||
if (vm_pager_has_page(object, pindex, NULL, NULL)) { | if (vm_pager_has_page(object, pindex, NULL, NULL)) { | ||||
rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); | rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); | ||||
if (rv != VM_PAGER_OK) { | if (rv != VM_PAGER_OK) { | ||||
if (allocflags & VM_ALLOC_WIRED) | if (allocflags & VM_ALLOC_WIRED) | ||||
vm_page_unwire_noq(m); | vm_page_unwire_noq(m); | ||||
vm_page_free(m); | vm_page_free(m); | ||||
*mp = NULL; | *mp = NULL; | ||||
return (rv); | return (rv); | ||||
} | } | ||||
MPASS(m->valid == VM_PAGE_BITS_ALL); | MPASS(vm_page_all_valid(m)); | ||||
} else { | } else { | ||||
vm_page_zero_invalid(m, TRUE); | vm_page_zero_invalid(m, TRUE); | ||||
} | } | ||||
out: | out: | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | ||||
if (xbusy) | if (xbusy) | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
else | else | ||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | if (m != NULL) { | ||||
m = vm_page_alloc_after(object, pindex + i, | m = vm_page_alloc_after(object, pindex + i, | ||||
pflags | VM_ALLOC_COUNT(count - i), mpred); | pflags | VM_ALLOC_COUNT(count - i), mpred); | ||||
if (m == NULL) { | if (m == NULL) { | ||||
if ((allocflags & VM_ALLOC_NOWAIT) != 0) | if ((allocflags & VM_ALLOC_NOWAIT) != 0) | ||||
break; | break; | ||||
goto retrylookup; | goto retrylookup; | ||||
} | } | ||||
} | } | ||||
if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) { | if (vm_page_none_valid(m) && | ||||
(allocflags & VM_ALLOC_ZERO) != 0) { | |||||
if ((m->flags & PG_ZERO) == 0) | if ((m->flags & PG_ZERO) == 0) | ||||
pmap_zero_page(m); | pmap_zero_page(m); | ||||
m->valid = VM_PAGE_BITS_ALL; | vm_page_valid(m); | ||||
} | } | ||||
if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | if ((allocflags & VM_ALLOC_NOBUSY) != 0) { | ||||
if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | if ((allocflags & VM_ALLOC_IGN_SBUSY) != 0) | ||||
vm_page_sunbusy(m); | vm_page_sunbusy(m); | ||||
else | else | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
} | } | ||||
ma[i] = mpred = m; | ma[i] = mpred = m; | ||||
Show All 23 Lines | vm_page_bits(int base, int size) | ||||
first_bit = base >> DEV_BSHIFT; | first_bit = base >> DEV_BSHIFT; | ||||
last_bit = (base + size - 1) >> DEV_BSHIFT; | last_bit = (base + size - 1) >> DEV_BSHIFT; | ||||
return (((vm_page_bits_t)2 << last_bit) - | return (((vm_page_bits_t)2 << last_bit) - | ||||
((vm_page_bits_t)1 << first_bit)); | ((vm_page_bits_t)1 << first_bit)); | ||||
} | } | ||||
static inline void | |||||
vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set) | |||||
{ | |||||
#if PAGE_SIZE == 32768 | |||||
atomic_set_64((uint64_t *)bits, set); | |||||
#elif PAGE_SIZE == 16384 | |||||
atomic_set_32((uint32_t *)bits, set); | |||||
#elif (PAGE_SIZE == 8192) && defined(atomic_set_16) | |||||
atomic_set_16((uint16_t *)bits, set); | |||||
#elif (PAGE_SIZE == 4096) && defined(atomic_set_8) | |||||
atomic_set_8((uint8_t *)bits, set); | |||||
#else /* PAGE_SIZE <= 8192 */ | |||||
uintptr_t addr; | |||||
int shift; | |||||
addr = (uintptr_t)bits; | |||||
/* | /* | ||||
* Use a trick to perform a 32-bit atomic on the | |||||
* containing aligned word, to not depend on the existence | |||||
* of atomic_{set, clear}_{8, 16}. | |||||
*/ | |||||
shift = addr & (sizeof(uint32_t) - 1); | |||||
#if BYTE_ORDER == BIG_ENDIAN | |||||
shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; | |||||
#else | |||||
shift *= NBBY; | |||||
#endif | |||||
addr &= ~(sizeof(uint32_t) - 1); | |||||
atomic_set_32((uint32_t *)addr, pagebits << shift); | |||||
#endif /* PAGE_SIZE */ | |||||
} | |||||
static inline void | |||||
vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear) | |||||
{ | |||||
#if PAGE_SIZE == 32768 | |||||
atomic_clear_64((uint64_t *)bits, clear); | |||||
#elif PAGE_SIZE == 16384 | |||||
atomic_clear_32((uint32_t *)bits, clear); | |||||
#elif (PAGE_SIZE == 8192) && defined(atomic_clear_16) | |||||
atomic_clear_16((uint16_t *)bits, clear); | |||||
#elif (PAGE_SIZE == 4096) && defined(atomic_clear_8) | |||||
atomic_clear_8((uint8_t *)bits, clear); | |||||
#else /* PAGE_SIZE <= 8192 */ | |||||
uintptr_t addr; | |||||
int shift; | |||||
addr = (uintptr_t)bits; | |||||
/* | |||||
* Use a trick to perform a 32-bit atomic on the | |||||
* containing aligned word, to not depend on the existence | |||||
* of atomic_{set, clear}_{8, 16}. | |||||
*/ | |||||
shift = addr & (sizeof(uint32_t) - 1); | |||||
#if BYTE_ORDER == BIG_ENDIAN | |||||
shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; | |||||
#else | |||||
shift *= NBBY; | |||||
#endif | |||||
addr &= ~(sizeof(uint32_t) - 1); | |||||
atomic_clear_32((uint32_t *)addr, pagebits << shift); | |||||
#endif /* PAGE_SIZE */ | |||||
} | |||||
/* | |||||
* vm_page_set_valid_range: | * vm_page_set_valid_range: | ||||
* | * | ||||
* Sets portions of a page valid. The arguments are expected | * Sets portions of a page valid. The arguments are expected | ||||
* to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | ||||
* of any partial chunks touched by the range. The invalid portion of | * of any partial chunks touched by the range. The invalid portion of | ||||
* such chunks will be zeroed. | * such chunks will be zeroed. | ||||
* | * | ||||
* (base + size) must be less then or equal to PAGE_SIZE. | * (base + size) must be less then or equal to PAGE_SIZE. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_set_valid_range(vm_page_t m, int base, int size) | vm_page_set_valid_range(vm_page_t m, int base, int size) | ||||
{ | { | ||||
int endoff, frag; | int endoff, frag; | ||||
vm_page_bits_t pagebits; | |||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | vm_page_assert_busied(m); | ||||
if (size == 0) /* handle degenerate case */ | if (size == 0) /* handle degenerate case */ | ||||
return; | return; | ||||
/* | /* | ||||
* If the base is not DEV_BSIZE aligned and the valid | * If the base is not DEV_BSIZE aligned and the valid | ||||
* bit is clear, we have to zero out a portion of the | * bit is clear, we have to zero out a portion of the | ||||
* first block. | * first block. | ||||
*/ | */ | ||||
Show All 17 Lines | vm_page_set_valid_range(vm_page_t m, int base, int size) | ||||
* is already dirty. | * is already dirty. | ||||
*/ | */ | ||||
KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, | KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0, | ||||
("vm_page_set_valid_range: page %p is dirty", m)); | ("vm_page_set_valid_range: page %p is dirty", m)); | ||||
/* | /* | ||||
* Set valid bits inclusive of any overlap. | * Set valid bits inclusive of any overlap. | ||||
*/ | */ | ||||
m->valid |= vm_page_bits(base, size); | pagebits = vm_page_bits(base, size); | ||||
if (vm_page_xbusied(m)) | |||||
m->valid |= pagebits; | |||||
else | |||||
vm_page_bits_set(m, &m->valid, pagebits); | |||||
} | } | ||||
/* | /* | ||||
* Clear the given bits from the specified page's dirty field. | * Clear the given bits from the specified page's dirty field. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) | vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) | ||||
{ | { | ||||
uintptr_t addr; | |||||
#if PAGE_SIZE < 16384 | |||||
int shift; | |||||
#endif | |||||
vm_page_assert_busied(m); | |||||
/* | /* | ||||
* If the object is locked and the page is neither exclusive busy nor | * If the page is xbusied and not write mapped we are the | ||||
* write mapped, then the page's dirty field cannot possibly be | * only thread that can modify dirty bits. Otherwise, The pmap | ||||
* set by a concurrent pmap operation. | * layer can call vm_page_dirty() without holding a distinguished | ||||
* lock. The combination of page busy and atomic operations | |||||
* suffice to guarantee consistency of the page dirty field. | |||||
*/ | */ | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) | ||||
if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) | |||||
m->dirty &= ~pagebits; | m->dirty &= ~pagebits; | ||||
else { | else | ||||
/* | vm_page_bits_clear(m, &m->dirty, pagebits); | ||||
* The pmap layer can call vm_page_dirty() without | |||||
* holding a distinguished lock. The combination of | |||||
* the object's lock and an atomic operation suffice | |||||
* to guarantee consistency of the page dirty field. | |||||
* | |||||
* For PAGE_SIZE == 32768 case, compiler already | |||||
* properly aligns the dirty field, so no forcible | |||||
* alignment is needed. Only require existence of | |||||
* atomic_clear_64 when page size is 32768. | |||||
*/ | |||||
addr = (uintptr_t)&m->dirty; | |||||
#if PAGE_SIZE == 32768 | |||||
atomic_clear_64((uint64_t *)addr, pagebits); | |||||
#elif PAGE_SIZE == 16384 | |||||
atomic_clear_32((uint32_t *)addr, pagebits); | |||||
#else /* PAGE_SIZE <= 8192 */ | |||||
/* | |||||
* Use a trick to perform a 32-bit atomic on the | |||||
* containing aligned word, to not depend on the existence | |||||
* of atomic_clear_{8, 16}. | |||||
*/ | |||||
shift = addr & (sizeof(uint32_t) - 1); | |||||
#if BYTE_ORDER == BIG_ENDIAN | |||||
shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY; | |||||
#else | |||||
shift *= NBBY; | |||||
#endif | |||||
addr &= ~(sizeof(uint32_t) - 1); | |||||
atomic_clear_32((uint32_t *)addr, pagebits << shift); | |||||
#endif /* PAGE_SIZE */ | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* vm_page_set_validclean: | * vm_page_set_validclean: | ||||
* | * | ||||
* Sets portions of a page valid and clean. The arguments are expected | * Sets portions of a page valid and clean. The arguments are expected | ||||
* to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | ||||
* of any partial chunks touched by the range. The invalid portion of | * of any partial chunks touched by the range. The invalid portion of | ||||
* such chunks will be zero'd. | * such chunks will be zero'd. | ||||
* | * | ||||
* (base + size) must be less then or equal to PAGE_SIZE. | * (base + size) must be less then or equal to PAGE_SIZE. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_set_validclean(vm_page_t m, int base, int size) | vm_page_set_validclean(vm_page_t m, int base, int size) | ||||
{ | { | ||||
vm_page_bits_t oldvalid, pagebits; | vm_page_bits_t oldvalid, pagebits; | ||||
int endoff, frag; | int endoff, frag; | ||||
/* Object lock for VPO_NOSYNC */ | |||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | VM_OBJECT_ASSERT_WLOCKED(m->object); | ||||
vm_page_assert_busied(m); | |||||
if (size == 0) /* handle degenerate case */ | if (size == 0) /* handle degenerate case */ | ||||
return; | return; | ||||
/* | /* | ||||
* If the base is not DEV_BSIZE aligned and the valid | * If the base is not DEV_BSIZE aligned and the valid | ||||
* bit is clear, we have to zero out a portion of the | * bit is clear, we have to zero out a portion of the | ||||
* first block. | * first block. | ||||
*/ | */ | ||||
Show All 20 Lines | vm_page_set_validclean(vm_page_t m, int base, int size) | ||||
* be set again. | * be set again. | ||||
* | * | ||||
* We set valid bits inclusive of any overlap, but we can only | * We set valid bits inclusive of any overlap, but we can only | ||||
* clear dirty bits for DEV_BSIZE chunks that are fully within | * clear dirty bits for DEV_BSIZE chunks that are fully within | ||||
* the range. | * the range. | ||||
*/ | */ | ||||
oldvalid = m->valid; | oldvalid = m->valid; | ||||
pagebits = vm_page_bits(base, size); | pagebits = vm_page_bits(base, size); | ||||
if (vm_page_xbusied(m)) | |||||
m->valid |= pagebits; | m->valid |= pagebits; | ||||
else | |||||
vm_page_bits_set(m, &m->valid, pagebits); | |||||
#if 0 /* NOT YET */ | #if 0 /* NOT YET */ | ||||
if ((frag = base & (DEV_BSIZE - 1)) != 0) { | if ((frag = base & (DEV_BSIZE - 1)) != 0) { | ||||
frag = DEV_BSIZE - frag; | frag = DEV_BSIZE - frag; | ||||
base += frag; | base += frag; | ||||
size -= frag; | size -= frag; | ||||
if (size < 0) | if (size < 0) | ||||
size = 0; | size = 0; | ||||
} | } | ||||
Show All 12 Lines | if (oldvalid == VM_PAGE_BITS_ALL) | ||||
* pmap_protect(), could clear a modification in the | * pmap_protect(), could clear a modification in the | ||||
* pmap and set the dirty field on the page before | * pmap and set the dirty field on the page before | ||||
* pmap_clear_modify() had begun and after the dirty | * pmap_clear_modify() had begun and after the dirty | ||||
* field was cleared here. | * field was cleared here. | ||||
*/ | */ | ||||
pmap_clear_modify(m); | pmap_clear_modify(m); | ||||
m->dirty = 0; | m->dirty = 0; | ||||
m->oflags &= ~VPO_NOSYNC; | m->oflags &= ~VPO_NOSYNC; | ||||
} else if (oldvalid != VM_PAGE_BITS_ALL) | } else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m)) | ||||
m->dirty &= ~pagebits; | m->dirty &= ~pagebits; | ||||
else | else | ||||
vm_page_clear_dirty_mask(m, pagebits); | vm_page_clear_dirty_mask(m, pagebits); | ||||
} | } | ||||
void | void | ||||
vm_page_clear_dirty(vm_page_t m, int base, int size) | vm_page_clear_dirty(vm_page_t m, int base, int size) | ||||
{ | { | ||||
vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); | vm_page_clear_dirty_mask(m, vm_page_bits(base, size)); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_set_invalid: | * vm_page_set_invalid: | ||||
* | * | ||||
* Invalidates DEV_BSIZE'd chunks within a page. Both the | * Invalidates DEV_BSIZE'd chunks within a page. Both the | ||||
* valid and dirty bits for the effected areas are cleared. | * valid and dirty bits for the effected areas are cleared. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_set_invalid(vm_page_t m, int base, int size) | vm_page_set_invalid(vm_page_t m, int base, int size) | ||||
{ | { | ||||
vm_page_bits_t bits; | vm_page_bits_t bits; | ||||
vm_object_t object; | vm_object_t object; | ||||
/* | |||||
* The object lock is required so that pages can't be mapped | |||||
* read-only while we're in the process of invalidating them. | |||||
*/ | |||||
object = m->object; | object = m->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
vm_page_assert_busied(m); | |||||
if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + | if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) + | ||||
size >= object->un_pager.vnp.vnp_size) | size >= object->un_pager.vnp.vnp_size) | ||||
bits = VM_PAGE_BITS_ALL; | bits = VM_PAGE_BITS_ALL; | ||||
else | else | ||||
bits = vm_page_bits(base, size); | bits = vm_page_bits(base, size); | ||||
if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL && | if (object->ref_count != 0 && vm_page_all_valid(m) && bits != 0) | ||||
bits != 0) | |||||
pmap_remove_all(m); | pmap_remove_all(m); | ||||
KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) || | KASSERT((bits == 0 && vm_page_all_valid(m)) || | ||||
!pmap_page_is_mapped(m), | !pmap_page_is_mapped(m), | ||||
("vm_page_set_invalid: page %p is mapped", m)); | ("vm_page_set_invalid: page %p is mapped", m)); | ||||
if (vm_page_xbusied(m)) { | |||||
m->valid &= ~bits; | m->valid &= ~bits; | ||||
m->dirty &= ~bits; | m->dirty &= ~bits; | ||||
} else { | |||||
vm_page_bits_clear(m, &m->valid, bits); | |||||
vm_page_bits_clear(m, &m->dirty, bits); | |||||
} | } | ||||
} | |||||
/* | /* | ||||
* vm_page_invalid: | |||||
* | |||||
* Invalidates the entire page. The page must be busy, unmapped, and | |||||
* the enclosing object must be locked. The object locks protects | |||||
* against concurrent read-only pmap enter which is done without | |||||
* busy. | |||||
*/ | |||||
void | |||||
vm_page_invalid(vm_page_t m) | |||||
{ | |||||
vm_page_assert_busied(m); | |||||
VM_OBJECT_ASSERT_LOCKED(m->object); | |||||
MPASS(!pmap_page_is_mapped(m)); | |||||
if (vm_page_xbusied(m)) | |||||
m->valid = 0; | |||||
else | |||||
vm_page_bits_clear(m, &m->valid, VM_PAGE_BITS_ALL); | |||||
} | |||||
/* | |||||
* vm_page_zero_invalid() | * vm_page_zero_invalid() | ||||
* | * | ||||
* The kernel assumes that the invalid portions of a page contain | * The kernel assumes that the invalid portions of a page contain | ||||
* garbage, but such pages can be mapped into memory by user code. | * garbage, but such pages can be mapped into memory by user code. | ||||
* When this occurs, we must zero out the non-valid portions of the | * When this occurs, we must zero out the non-valid portions of the | ||||
* page so user code sees what it expects. | * page so user code sees what it expects. | ||||
* | * | ||||
* Pages are most often semi-valid when the end of a file is mapped | * Pages are most often semi-valid when the end of a file is mapped | ||||
* into memory and the file's size is not page aligned. | * into memory and the file's size is not page aligned. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) | vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) | ||||
{ | { | ||||
int b; | int b; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | |||||
/* | /* | ||||
* Scan the valid bits looking for invalid sections that | * Scan the valid bits looking for invalid sections that | ||||
* must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the | * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the | ||||
* valid bit may be set ) have already been zeroed by | * valid bit may be set ) have already been zeroed by | ||||
* vm_page_set_validclean(). | * vm_page_set_validclean(). | ||||
*/ | */ | ||||
for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { | for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { | ||||
if (i == (PAGE_SIZE / DEV_BSIZE) || | if (i == (PAGE_SIZE / DEV_BSIZE) || | ||||
(m->valid & ((vm_page_bits_t)1 << i))) { | (m->valid & ((vm_page_bits_t)1 << i))) { | ||||
if (i > b) { | if (i > b) { | ||||
pmap_zero_page_area(m, | pmap_zero_page_area(m, | ||||
b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); | b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); | ||||
} | } | ||||
b = i + 1; | b = i + 1; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* setvalid is TRUE when we can safely set the zero'd areas | * setvalid is TRUE when we can safely set the zero'd areas | ||||
* as being valid. We can do this if there are no cache consistancy | * as being valid. We can do this if there are no cache consistancy | ||||
* issues. e.g. it is ok to do with UFS, but not ok to do with NFS. | * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. | ||||
*/ | */ | ||||
if (setvalid) | if (setvalid) | ||||
m->valid = VM_PAGE_BITS_ALL; | vm_page_valid(m); | ||||
} | } | ||||
/* | /* | ||||
* vm_page_is_valid: | * vm_page_is_valid: | ||||
* | * | ||||
* Is (partial) page valid? Note that the case where size == 0 | * Is (partial) page valid? Note that the case where size == 0 | ||||
* will return FALSE in the degenerate case where the page is | * will return FALSE in the degenerate case where the page is | ||||
* entirely invalid, and TRUE otherwise. | * entirely invalid, and TRUE otherwise. | ||||
* | |||||
* Some callers envoke this routine without the busy lock held and | |||||
kib: invoke | |||||
* handle races via higher level locks. Typical callers should | |||||
* hold a busy lock to prevent invalidation. | |||||
*/ | */ | ||||
int | int | ||||
vm_page_is_valid(vm_page_t m, int base, int size) | vm_page_is_valid(vm_page_t m, int base, int size) | ||||
{ | { | ||||
vm_page_bits_t bits; | vm_page_bits_t bits; | ||||
VM_OBJECT_ASSERT_LOCKED(m->object); | |||||
bits = vm_page_bits(base, size); | bits = vm_page_bits(base, size); | ||||
return (m->valid != 0 && (m->valid & bits) == bits); | return (m->valid != 0 && (m->valid & bits) == bits); | ||||
} | } | ||||
/* | /* | ||||
* Returns true if all of the specified predicates are true for the entire | * Returns true if all of the specified predicates are true for the entire | ||||
* (super)page and false otherwise. | * (super)page and false otherwise. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* Set the page's dirty bits if the page is modified. | * Set the page's dirty bits if the page is modified. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_test_dirty(vm_page_t m) | vm_page_test_dirty(vm_page_t m) | ||||
{ | { | ||||
VM_OBJECT_ASSERT_WLOCKED(m->object); | vm_page_assert_busied(m); | ||||
if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) | if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m)) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
} | |||||
void | |||||
vm_page_valid(vm_page_t m) | |||||
{ | |||||
vm_page_assert_busied(m); | |||||
if (vm_page_xbusied(m)) | |||||
m->valid = VM_PAGE_BITS_ALL; | |||||
else | |||||
vm_page_bits_set(m, &m->valid, VM_PAGE_BITS_ALL); | |||||
} | } | ||||
void | void | ||||
vm_page_lock_KBI(vm_page_t m, const char *file, int line) | vm_page_lock_KBI(vm_page_t m, const char *file, int line) | ||||
{ | { | ||||
mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); | mtx_lock_flags_(vm_page_lockptr(m), 0, file, line); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 127 Lines • Show Last 20 Lines |
invoke