Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/vm_page.h
Show First 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
* and sundry status bits. | * and sundry status bits. | ||||
* | * | ||||
* In general, operations on this structure's mutable fields are | * In general, operations on this structure's mutable fields are | ||||
* synchronized using either one of or a combination of the lock on the | * synchronized using either one of or a combination of the lock on the | ||||
* object that the page belongs to (O), the pool lock for the page (P), | * object that the page belongs to (O), the pool lock for the page (P), | ||||
* or the lock for either the free or paging queue (Q). If a field is | * or the lock for either the free or paging queue (Q). If a field is | ||||
* annotated below with two of these locks, then holding either lock is | * annotated below with two of these locks, then holding either lock is | ||||
* sufficient for read access, but both locks are required for write | * sufficient for read access, but both locks are required for write | ||||
* access. | * access. An annotation of (C) indicates that the field is immutable. | ||||
* | * | ||||
* In contrast, the synchronization of accesses to the page's | * In contrast, the synchronization of accesses to the page's | ||||
* dirty field is machine dependent (M). In the | * dirty field is machine dependent (M). In the | ||||
* machine-independent layer, the lock on the object that the | * machine-independent layer, the lock on the object that the | ||||
* page belongs to must be held in order to operate on the field. | * page belongs to must be held in order to operate on the field. | ||||
* However, the pmap layer is permitted to set all bits within | * However, the pmap layer is permitted to set all bits within | ||||
* the field without holding that lock. If the underlying | * the field without holding that lock. If the underlying | ||||
* architecture does not support atomic read-modify-write | * architecture does not support atomic read-modify-write | ||||
* operations on the field's type, then the machine-independent | * operations on the field's type, then the machine-independent | ||||
* layer uses a 32-bit atomic on the aligned 32-bit word that | * layer uses a 32-bit atomic on the aligned 32-bit word that | ||||
* contains the dirty field. In the machine-independent layer, | * contains the dirty field. In the machine-independent layer, | ||||
* the implementation of read-modify-write operations on the | * the implementation of read-modify-write operations on the | ||||
* field is encapsulated in vm_page_clear_dirty_mask(). | * field is encapsulated in vm_page_clear_dirty_mask(). | ||||
* | |||||
* The page structure contains two counters which prevent page reuse. | |||||
* Both counters are protected by the page lock (P). The hold | |||||
* counter counts transient references obtained via a pmap lookup, and | |||||
* is also used to prevent page reclamation in situations where it is | |||||
* undesirable to block other accesses to the page. The wire counter | |||||
* is used to implement mlock(2) and is non-zero for pages containing | |||||
* kernel memory. Pages that are wired or held will not be reclaimed | |||||
* or laundered by the page daemon, but are treated differently during | |||||
* a page queue scan: held pages remain at their position in the queue, | |||||
* while wired pages are removed from the queue and must later be | |||||
* re-enqueued appropriately by the unwiring thread. It is legal to | |||||
* call vm_page_free() on a held page; doing so causes it to be removed | |||||
* from its object and page queue, and the page is released to the | |||||
* allocator once the last hold reference is dropped. In contrast, | |||||
* wired pages may not be freed. | |||||
* | |||||
* In some pmap implementations, the wire count of a page table page is | |||||
* used to track the number of populated entries. | |||||
* | |||||
* The busy lock is an embedded reader-writer lock which protects the | |||||
* page's contents and identity (i.e., its <object, pindex> tuple) and | |||||
* interlocks with the object lock (O). In particular, a page may be | |||||
* busied or unbusied only with the object write lock held. To avoid | |||||
* bloating the page structure, the busy lock lacks some of the | |||||
* features available to the kernel's general-purpose synchronization | |||||
* primitives. As a result, busy lock ordering rules are not verified, | |||||
* lock recursion is not detected, and an attempt to xbusy a busy page | |||||
* or sbusy an xbusy page results will trigger a panic rather than | |||||
* causing the thread to block. vm_page_sleep_if_busy() can be used to | |||||
* sleep until the page's busy state changes, after which the caller | |||||
* must re-lookup the page and re-evaluate its state. | |||||
*/ | */ | ||||
#if PAGE_SIZE == 4096 | #if PAGE_SIZE == 4096 | ||||
#define VM_PAGE_BITS_ALL 0xffu | #define VM_PAGE_BITS_ALL 0xffu | ||||
typedef uint8_t vm_page_bits_t; | typedef uint8_t vm_page_bits_t; | ||||
#elif PAGE_SIZE == 8192 | #elif PAGE_SIZE == 8192 | ||||
#define VM_PAGE_BITS_ALL 0xffffu | #define VM_PAGE_BITS_ALL 0xffffu | ||||
typedef uint16_t vm_page_bits_t; | typedef uint16_t vm_page_bits_t; | ||||
Show All 25 Lines | struct vm_page { | ||||
u_int wire_count; /* wired down maps refs (P) */ | u_int wire_count; /* wired down maps refs (P) */ | ||||
volatile u_int busy_lock; /* busy owners lock */ | volatile u_int busy_lock; /* busy owners lock */ | ||||
uint16_t hold_count; /* page hold count (P) */ | uint16_t hold_count; /* page hold count (P) */ | ||||
uint16_t flags; /* page PG_* flags (P) */ | uint16_t flags; /* page PG_* flags (P) */ | ||||
uint8_t aflags; /* access is atomic */ | uint8_t aflags; /* access is atomic */ | ||||
uint8_t oflags; /* page VPO_* flags (O) */ | uint8_t oflags; /* page VPO_* flags (O) */ | ||||
uint8_t queue; /* page queue index (P,Q) */ | uint8_t queue; /* page queue index (P,Q) */ | ||||
int8_t psind; /* pagesizes[] index (O) */ | int8_t psind; /* pagesizes[] index (O) */ | ||||
int8_t segind; | int8_t segind; /* vm_phys segment index (C) */ | ||||
uint8_t order; /* index of the buddy queue */ | uint8_t order; /* index of the buddy queue */ | ||||
uint8_t pool; | uint8_t pool; /* vm_phys freepool index (Q) */ | ||||
u_char act_count; /* page usage count (P) */ | u_char act_count; /* page usage count (P) */ | ||||
/* NOTE that these must support one bit per DEV_BSIZE in a page */ | /* NOTE that these must support one bit per DEV_BSIZE in a page */ | ||||
/* so, on normal X86 kernels, they must be at least 8 bits wide */ | /* so, on normal X86 kernels, they must be at least 8 bits wide */ | ||||
vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */ | vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */ | ||||
vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ | vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ | ||||
}; | }; | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 318 Lines • ▼ Show 20 Lines | |||||
void vm_page_set_valid_range(vm_page_t m, int base, int size); | void vm_page_set_valid_range(vm_page_t m, int base, int size); | ||||
int vm_page_sleep_if_busy(vm_page_t m, const char *msg); | int vm_page_sleep_if_busy(vm_page_t m, const char *msg); | ||||
vm_offset_t vm_page_startup(vm_offset_t vaddr); | vm_offset_t vm_page_startup(vm_offset_t vaddr); | ||||
void vm_page_sunbusy(vm_page_t m); | void vm_page_sunbusy(vm_page_t m); | ||||
bool vm_page_try_to_free(vm_page_t m); | bool vm_page_try_to_free(vm_page_t m); | ||||
int vm_page_trysbusy(vm_page_t m); | int vm_page_trysbusy(vm_page_t m); | ||||
void vm_page_unhold_pages(vm_page_t *ma, int count); | void vm_page_unhold_pages(vm_page_t *ma, int count); | ||||
void vm_page_unswappable(vm_page_t m); | void vm_page_unswappable(vm_page_t m); | ||||
boolean_t vm_page_unwire(vm_page_t m, uint8_t queue); | bool vm_page_unwire(vm_page_t m, uint8_t queue); | ||||
bool vm_page_unwire_noq(vm_page_t m); | |||||
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); | void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); | ||||
void vm_page_wire (vm_page_t); | void vm_page_wire (vm_page_t); | ||||
void vm_page_xunbusy_hard(vm_page_t m); | void vm_page_xunbusy_hard(vm_page_t m); | ||||
void vm_page_xunbusy_maybelocked(vm_page_t m); | void vm_page_xunbusy_maybelocked(vm_page_t m); | ||||
void vm_page_set_validclean (vm_page_t, int, int); | void vm_page_set_validclean (vm_page_t, int, int); | ||||
void vm_page_clear_dirty (vm_page_t, int, int); | void vm_page_clear_dirty (vm_page_t, int, int); | ||||
void vm_page_set_invalid (vm_page_t, int, int); | void vm_page_set_invalid (vm_page_t, int, int); | ||||
int vm_page_is_valid (vm_page_t, int, int); | int vm_page_is_valid (vm_page_t, int, int); | ||||
▲ Show 20 Lines • Show All 208 Lines • ▼ Show 20 Lines | vm_page_inactive(vm_page_t m) | ||||
return (m->queue == PQ_INACTIVE); | return (m->queue == PQ_INACTIVE); | ||||
} | } | ||||
static inline bool | static inline bool | ||||
vm_page_in_laundry(vm_page_t m) | vm_page_in_laundry(vm_page_t m) | ||||
{ | { | ||||
return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE); | return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE); | ||||
} | |||||
/* | |||||
* vm_page_held: | |||||
* | |||||
* Return true if a reference prevents the page from being reclaimable. | |||||
*/ | |||||
static inline bool | |||||
vm_page_held(vm_page_t m) | |||||
{ | |||||
return (m->hold_count > 0 || m->wire_count > 0); | |||||
} | } | ||||
#endif /* _KERNEL */ | #endif /* _KERNEL */ | ||||
#endif /* !_VM_PAGE_ */ | #endif /* !_VM_PAGE_ */ |