Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_page.c
Show First 20 Lines • Show All 1,578 Lines • ▼ Show 20 Lines | vm_page_object_remove(vm_page_t m) | ||||
vm_object_t object; | vm_object_t object; | ||||
vm_page_t mrem; | vm_page_t mrem; | ||||
object = m->object; | object = m->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
KASSERT((m->ref_count & VPRC_OBJREF) != 0, | KASSERT((m->ref_count & VPRC_OBJREF) != 0, | ||||
("page %p is missing its object ref", m)); | ("page %p is missing its object ref", m)); | ||||
/* Deferred free of swap space. */ | |||||
if ((m->a.flags & PGA_SWAP_FREE) != 0) | |||||
vm_pager_page_unswapped(m); | |||||
mrem = vm_radix_remove(&object->rtree, m->pindex); | mrem = vm_radix_remove(&object->rtree, m->pindex); | ||||
KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m)); | ||||
/* | /* | ||||
* Now remove from the object's list of backed pages. | * Now remove from the object's list of backed pages. | ||||
*/ | */ | ||||
TAILQ_REMOVE(&object->memq, m, listq); | TAILQ_REMOVE(&object->memq, m, listq); | ||||
▲ Show 20 Lines • Show All 3,033 Lines • ▼ Show 20 Lines | |||||
#else | #else | ||||
shift *= NBBY; | shift *= NBBY; | ||||
#endif | #endif | ||||
addr &= ~(sizeof(uint32_t) - 1); | addr &= ~(sizeof(uint32_t) - 1); | ||||
atomic_clear_32((uint32_t *)addr, clear << shift); | atomic_clear_32((uint32_t *)addr, clear << shift); | ||||
#endif /* PAGE_SIZE */ | #endif /* PAGE_SIZE */ | ||||
} | } | ||||
static inline vm_page_bits_t | |||||
vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits) | |||||
{ | |||||
#if PAGE_SIZE == 32768 | |||||
uint64_t old; | |||||
old = *bits; | |||||
while (atomic_fcmpset_64(bits, &old, newbits) == 0); | |||||
return (old); | |||||
#elif PAGE_SIZE == 16384 | |||||
uint32_t old; | |||||
old = *bits; | |||||
while (atomic_fcmpset_32(bits, &old, newbits) == 0); | |||||
return (old); | |||||
#elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16) | |||||
uint16_t old; | |||||
old = *bits; | |||||
while (atomic_fcmpset_16(bits, &old, newbits) == 0); | |||||
return (old); | |||||
#elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8) | |||||
uint8_t old; | |||||
old = *bits; | |||||
while (atomic_fcmpset_8(bits, &old, newbits) == 0); | |||||
return (old); | |||||
#else /* PAGE_SIZE <= 4096*/ | |||||
uintptr_t addr; | |||||
uint32_t old, new, mask; | |||||
int shift; | |||||
addr = (uintptr_t)bits; | |||||
/* | /* | ||||
* Use a trick to perform a 32-bit atomic on the | |||||
* containing aligned word, to not depend on the existence | |||||
* of atomic_{set, swap, clear}_{8, 16}. | |||||
*/ | |||||
shift = addr & (sizeof(uint32_t) - 1); | |||||
#if BYTE_ORDER == BIG_ENDIAN | |||||
shift = (sizeof(uint32_t) - sizeof(vm_page_bits_t) - shift) * NBBY; | |||||
#else | |||||
shift *= NBBY; | |||||
#endif | |||||
addr &= ~(sizeof(uint32_t) - 1); | |||||
mask = VM_PAGE_BITS_ALL << shift; | |||||
old = *bits; | |||||
do { | |||||
new = old & ~mask; | |||||
new |= newbits << shift; | |||||
} while (atomic_fcmpset_32((uint32_t *)addr, &old, new) == 0); | |||||
return (old >> shift); | |||||
#endif /* PAGE_SIZE */ | |||||
} | |||||
/* | |||||
* vm_page_set_valid_range: | * vm_page_set_valid_range: | ||||
* | * | ||||
* Sets portions of a page valid. The arguments are expected | * Sets portions of a page valid. The arguments are expected | ||||
* to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive | ||||
* of any partial chunks touched by the range. The invalid portion of | * of any partial chunks touched by the range. The invalid portion of | ||||
* such chunks will be zeroed. | * such chunks will be zeroed. | ||||
* | * | ||||
* (base + size) must be less then or equal to PAGE_SIZE. | * (base + size) must be less then or equal to PAGE_SIZE. | ||||
Show All 38 Lines | vm_page_set_valid_range(vm_page_t m, int base, int size) | ||||
/* | /* | ||||
* Set valid bits inclusive of any overlap. | * Set valid bits inclusive of any overlap. | ||||
*/ | */ | ||||
pagebits = vm_page_bits(base, size); | pagebits = vm_page_bits(base, size); | ||||
if (vm_page_xbusied(m)) | if (vm_page_xbusied(m)) | ||||
m->valid |= pagebits; | m->valid |= pagebits; | ||||
else | else | ||||
vm_page_bits_set(m, &m->valid, pagebits); | vm_page_bits_set(m, &m->valid, pagebits); | ||||
} | |||||
/* | |||||
* Set the page dirty bits and free the invalid swap space if | |||||
* present. Returns the previous dirty bits. | |||||
*/ | |||||
vm_page_bits_t | |||||
vm_page_set_dirty(vm_page_t m) | |||||
{ | |||||
vm_page_bits_t old; | |||||
VM_PAGE_OBJECT_BUSY_ASSERT(m); | |||||
if (vm_page_xbusied(m) && !pmap_page_is_write_mapped(m)) { | |||||
old = m->dirty; | |||||
m->dirty = VM_PAGE_BITS_ALL; | |||||
} else | |||||
old = vm_page_bits_swap(m, &m->dirty, VM_PAGE_BITS_ALL); | |||||
if (old == 0 && (m->a.flags & PGA_SWAP_SPACE) != 0) | |||||
vm_pager_page_unswapped(m); | |||||
return (old); | |||||
} | } | ||||
/* | /* | ||||
* Clear the given bits from the specified page's dirty field. | * Clear the given bits from the specified page's dirty field. | ||||
*/ | */ | ||||
static __inline void | static __inline void | ||||
vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) | vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 440 Lines • Show Last 20 Lines |