Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 115 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_reserv.h> | #include <vm/vm_reserv.h> | ||||
#include <vm/vm_extern.h> | #include <vm/vm_extern.h> | ||||
#include <vm/uma.h> | #include <vm/uma.h> | ||||
#include <vm/uma_int.h> | #include <vm/uma_int.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
/* | /* | ||||
* Isilon: The per-cpu page cache only works when we do not require the | |||||
* vm_page_queue_free_mtx to protect the object's cache queue. The cache | |||||
* queue gives us no real advantage and so it is disabled by default. | |||||
*/ | |||||
#ifndef VM_ENABLE_CACHE | |||||
#define VM_PERCPU_FREE 1 | |||||
#endif | |||||
/* | |||||
* Associated with page of user-allocatable memory is a | * Associated with page of user-allocatable memory is a | ||||
* page structure. | * page structure. | ||||
*/ | */ | ||||
struct vm_domain vm_dom[MAXMEMDOM]; | struct vm_domain vm_dom[MAXMEMDOM]; | ||||
struct mtx_padalign vm_page_queue_free_mtx; | struct mtx_padalign vm_page_queue_free_mtx; | ||||
struct mtx_padalign pa_lock[PA_LOCK_COUNT]; | struct mtx_padalign pa_lock[PA_LOCK_COUNT]; | ||||
Show All 15 Lines | |||||
static TAILQ_HEAD(, vm_page) blacklist_head; | static TAILQ_HEAD(, vm_page) blacklist_head; | ||||
static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); | static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS); | ||||
SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | | SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD | | ||||
CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); | CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages"); | ||||
static uma_zone_t fakepg_zone; | static uma_zone_t fakepg_zone; | ||||
static struct vnode *vm_page_alloc_init(vm_page_t m); | #ifdef VM_ENABLE_CACHE | ||||
static void vm_page_cache_turn_free(vm_page_t m); | static void vm_page_cache_turn_free(vm_page_t m); | ||||
#endif | |||||
static struct vnode *vm_page_alloc_init(vm_page_t m); | |||||
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); | ||||
static void vm_page_enqueue(uint8_t queue, vm_page_t m); | static void vm_page_enqueue(uint8_t queue, vm_page_t m); | ||||
static void vm_page_init_fakepg(void *dummy); | static void vm_page_init_fakepg(void *dummy); | ||||
static int vm_page_insert_after(vm_page_t m, vm_object_t object, | static int vm_page_insert_after(vm_page_t m, vm_object_t object, | ||||
vm_pindex_t pindex, vm_page_t mpred); | vm_pindex_t pindex, vm_page_t mpred); | ||||
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, | ||||
vm_page_t mpred); | vm_page_t mpred); | ||||
Show All 9 Lines | |||||
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ | /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ | ||||
#if PAGE_SIZE == 32768 | #if PAGE_SIZE == 32768 | ||||
#ifdef CTASSERT | #ifdef CTASSERT | ||||
CTASSERT(sizeof(u_long) >= 8); | CTASSERT(sizeof(u_long) >= 8); | ||||
#endif | #endif | ||||
#endif | #endif | ||||
#ifndef VM_ENABLE_CACHE | |||||
struct vm_page_percpu { | |||||
struct mtx vpp_lock; | |||||
struct pglist vpp_pages; | |||||
int vpp_cnt; | |||||
} __aligned(CACHE_LINE_SIZE); | |||||
struct vm_page_percpu page_percpu[MAXCPU] __aligned(CACHE_LINE_SIZE); | |||||
#define VM_PERCPU_MIN 128 | |||||
#define VM_PERCPU_TARGET (VM_PERCPU_MIN * 2) | |||||
#define VM_PERCPU_MAX (VM_PERCPU_MIN * 3) | |||||
static void | |||||
vm_page_percpu_init(void) | |||||
{ | |||||
int i; | |||||
for (i = 0; i < MAXCPU; i++) { | |||||
mtx_init(&page_percpu[i].vpp_lock, "per-cpu free mtx", NULL, | |||||
MTX_DEF); | |||||
TAILQ_INIT(&page_percpu[i].vpp_pages); | |||||
page_percpu[i].vpp_cnt = 0; | |||||
} | |||||
} | |||||
static vm_page_t | |||||
vm_page_percpu_alloc(vm_object_t object) | |||||
{ | |||||
struct vm_page_percpu *ppcpu = &page_percpu[PCPU_GET(cpuid)]; | |||||
vm_page_t m; | |||||
#if VM_NRESERVLEVEL > 0 | |||||
/* | /* | ||||
* Skip the cache of free pages for objects that have reservations | |||||
* so that they can still get superpages. This will never be set | |||||
* for objects populated via the filesystem buffercache. | |||||
*/ | |||||
if (object != NULL && (object->flags & OBJ_COLORED) != 0) | |||||
return (NULL); | |||||
#endif | |||||
mtx_lock(&ppcpu->vpp_lock); | |||||
if (ppcpu->vpp_cnt < VM_PERCPU_MIN) { | |||||
mtx_lock(&vm_page_queue_free_mtx); | |||||
while (!vm_page_count_min() && | |||||
ppcpu->vpp_cnt < VM_PERCPU_TARGET) { | |||||
m = vm_phys_alloc_pages(object != NULL ? | |||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); | |||||
if (m == NULL) | |||||
break; | |||||
vm_phys_freecnt_adj(m, -1); | |||||
ppcpu->vpp_cnt++; | |||||
TAILQ_INSERT_TAIL(&ppcpu->vpp_pages, m, plinks.q); | |||||
} | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
} | |||||
m = NULL; | |||||
if (ppcpu->vpp_cnt > 0) { | |||||
m = TAILQ_FIRST(&ppcpu->vpp_pages); | |||||
TAILQ_REMOVE(&ppcpu->vpp_pages, m, plinks.q); | |||||
ppcpu->vpp_cnt--; | |||||
} | |||||
mtx_unlock(&ppcpu->vpp_lock); | |||||
return (m); | |||||
} | |||||
static inline void vm_page_free_wakeup(void); | |||||
static void | |||||
vm_page_percpu_free(vm_page_t m) | |||||
{ | |||||
struct vm_page_percpu *ppcpu = &page_percpu[PCPU_GET(cpuid)]; | |||||
mtx_lock(&ppcpu->vpp_lock); | |||||
TAILQ_INSERT_HEAD(&ppcpu->vpp_pages, m, plinks.q); | |||||
ppcpu->vpp_cnt++; | |||||
if (ppcpu->vpp_cnt > VM_PERCPU_MAX) { | |||||
mtx_lock(&vm_page_queue_free_mtx); | |||||
while (ppcpu->vpp_cnt > VM_PERCPU_TARGET) { | |||||
m = TAILQ_FIRST(&ppcpu->vpp_pages); | |||||
TAILQ_REMOVE(&ppcpu->vpp_pages, m, plinks.q); | |||||
ppcpu->vpp_cnt--; | |||||
vm_phys_freecnt_adj(m, 1); | |||||
#if VM_NRESERVLEVEL > 0 | |||||
if (!vm_reserv_free_page(m)) | |||||
#else | |||||
if (TRUE) | |||||
#endif | |||||
vm_phys_free_pages(m, 0); | |||||
} | |||||
vm_page_free_wakeup(); | |||||
mtx_unlock(&vm_page_queue_free_mtx); | |||||
} | |||||
mtx_unlock(&ppcpu->vpp_lock); | |||||
} | |||||
#endif /* VM_ENABLE_CACHE */ | |||||
/* | |||||
* Try to acquire a physical address lock while a pmap is locked. If we | * Try to acquire a physical address lock while a pmap is locked. If we | ||||
* fail to trylock we unlock and lock the pmap directly and cache the | * fail to trylock we unlock and lock the pmap directly and cache the | ||||
* locked pa in *locked. The caller should then restart their loop in case | * locked pa in *locked. The caller should then restart their loop in case | ||||
* the virtual to physical mapping has changed. | * the virtual to physical mapping has changed. | ||||
*/ | */ | ||||
int | int | ||||
vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) | vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 422 Lines • ▼ Show 20 Lines | #endif | ||||
freeenv(list); | freeenv(list); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
/* | /* | ||||
* Initialize the reservation management system. | * Initialize the reservation management system. | ||||
*/ | */ | ||||
vm_reserv_init(); | vm_reserv_init(); | ||||
#endif | #endif | ||||
#ifdef VM_PERCPU_FREE | |||||
vm_page_percpu_init(); | |||||
#endif | |||||
return (vaddr); | return (vaddr); | ||||
} | } | ||||
void | void | ||||
vm_page_reference(vm_page_t m) | vm_page_reference(vm_page_t m) | ||||
{ | { | ||||
vm_page_aflag_set(m, PGA_REFERENCED); | vm_page_aflag_set(m, PGA_REFERENCED); | ||||
▲ Show 20 Lines • Show All 799 Lines • ▼ Show 20 Lines | |||||
* zero is given for "end", then the range's upper bound is | * zero is given for "end", then the range's upper bound is | ||||
* infinity. If the given object is backed by a vnode and it | * infinity. If the given object is backed by a vnode and it | ||||
* transitions from having one or more cached pages to none, the | * transitions from having one or more cached pages to none, the | ||||
* vnode's hold count is reduced. | * vnode's hold count is reduced. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) | vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) | ||||
{ | { | ||||
#ifdef VM_ENABLE_CACHE | |||||
vm_page_t m; | vm_page_t m; | ||||
boolean_t empty; | boolean_t empty; | ||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
if (__predict_false(vm_radix_is_empty(&object->cache))) { | if (__predict_false(vm_radix_is_empty(&object->cache))) { | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
return; | return; | ||||
} | } | ||||
while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { | while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { | ||||
if (end != 0 && m->pindex >= end) | if (end != 0 && m->pindex >= end) | ||||
break; | break; | ||||
vm_radix_remove(&object->cache, m->pindex); | vm_radix_remove(&object->cache, m->pindex); | ||||
vm_page_cache_turn_free(m); | vm_page_cache_turn_free(m); | ||||
} | } | ||||
empty = vm_radix_is_empty(&object->cache); | empty = vm_radix_is_empty(&object->cache); | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
if (object->type == OBJT_VNODE && empty) | if (object->type == OBJT_VNODE && empty) | ||||
vdrop(object->handle); | vdrop(object->handle); | ||||
#endif | |||||
} | } | ||||
/* | /* | ||||
* Returns the cached page that is associated with the given | * Returns the cached page that is associated with the given | ||||
* object and offset. If, however, none exists, returns NULL. | * object and offset. If, however, none exists, returns NULL. | ||||
* | * | ||||
* The free page queue must be locked. | * The free page queue must be locked. | ||||
*/ | */ | ||||
#ifdef VM_ENABLE_CACHE | |||||
static inline vm_page_t | static inline vm_page_t | ||||
vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) | vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
return (vm_radix_lookup(&object->cache, pindex)); | return (vm_radix_lookup(&object->cache, pindex)); | ||||
} | } | ||||
#endif | |||||
/* | /* | ||||
* Remove the given cached page from its containing object's | * Remove the given cached page from its containing object's | ||||
* collection of cached pages. | * collection of cached pages. | ||||
* | * | ||||
* The free page queue must be locked. | * The free page queue must be locked. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_cache_remove(vm_page_t m) | vm_page_cache_remove(vm_page_t m) | ||||
Show All 17 Lines | |||||
* correspond to offset zero in the new object. | * correspond to offset zero in the new object. | ||||
* | * | ||||
* The new object must be locked. | * The new object must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, | vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, | ||||
vm_object_t new_object) | vm_object_t new_object) | ||||
{ | { | ||||
#ifdef VM_ENABLE_CACHE | |||||
vm_page_t m; | vm_page_t m; | ||||
/* | /* | ||||
* Insertion into an object's collection of cached pages | * Insertion into an object's collection of cached pages | ||||
* requires the object to be locked. In contrast, removal does | * requires the object to be locked. In contrast, removal does | ||||
* not. | * not. | ||||
*/ | */ | ||||
VM_OBJECT_ASSERT_WLOCKED(new_object); | VM_OBJECT_ASSERT_WLOCKED(new_object); | ||||
Show All 13 Lines | while ((m = vm_radix_lookup_ge(&orig_object->cache, | ||||
vm_radix_remove(&orig_object->cache, m->pindex); | vm_radix_remove(&orig_object->cache, m->pindex); | ||||
/* Update the page's object and offset. */ | /* Update the page's object and offset. */ | ||||
m->object = new_object; | m->object = new_object; | ||||
m->pindex -= offidxstart; | m->pindex -= offidxstart; | ||||
if (vm_radix_insert(&new_object->cache, m)) | if (vm_radix_insert(&new_object->cache, m)) | ||||
vm_page_cache_turn_free(m); | vm_page_cache_turn_free(m); | ||||
} | } | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
#endif | |||||
} | } | ||||
/* | /* | ||||
* Returns TRUE if a cached page is associated with the given object and | * Returns TRUE if a cached page is associated with the given object and | ||||
* offset, and FALSE otherwise. | * offset, and FALSE otherwise. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) | vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
#ifdef VM_ENABLE_CACHE | |||||
vm_page_t m; | vm_page_t m; | ||||
#endif | |||||
/* | /* | ||||
* Insertion into an object's collection of cached pages requires the | * Insertion into an object's collection of cached pages requires the | ||||
* object to be locked. Therefore, if the object is locked and the | * object to be locked. Therefore, if the object is locked and the | ||||
* object's collection is empty, there is no need to acquire the free | * object's collection is empty, there is no need to acquire the free | ||||
* page queues lock in order to prove that the specified page doesn't | * page queues lock in order to prove that the specified page doesn't | ||||
* exist. | * exist. | ||||
*/ | */ | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
if (__predict_true(vm_object_cache_is_empty(object))) | if (__predict_true(vm_object_cache_is_empty(object))) | ||||
return (FALSE); | return (FALSE); | ||||
#ifdef VM_ENABLE_CACHE | |||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
m = vm_page_cache_lookup(object, pindex); | m = vm_page_cache_lookup(object, pindex); | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
return (m != NULL); | return (m != NULL); | ||||
#else | |||||
panic("Found unexpected cached page."); | |||||
return (FALSE); | |||||
#endif | |||||
} | } | ||||
/* | /* | ||||
* vm_page_alloc: | * vm_page_alloc: | ||||
* | * | ||||
* Allocate and return a page that is associated with the specified | * Allocate and return a page that is associated with the specified | ||||
* object and offset pair. By default, this page is exclusive busied. | * object and offset pair. By default, this page is exclusive busied. | ||||
* | * | ||||
Show All 19 Lines | |||||
* VM_ALLOC_ZERO prefer a zeroed page | * VM_ALLOC_ZERO prefer a zeroed page | ||||
* | * | ||||
* This routine may not sleep. | * This routine may not sleep. | ||||
*/ | */ | ||||
vm_page_t | vm_page_t | ||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | ||||
{ | { | ||||
struct vnode *vp = NULL; | struct vnode *vp = NULL; | ||||
#ifdef VM_ENABLE_CACHE | |||||
vm_object_t m_object; | vm_object_t m_object; | ||||
#endif | |||||
vm_page_t m, mpred; | vm_page_t m, mpred; | ||||
int flags, req_class; | int flags, req_class; | ||||
mpred = 0; /* XXX: pacify gcc */ | mpred = 0; /* XXX: pacify gcc */ | ||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) && | ||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | (object != NULL || (req & VM_ALLOC_SBUSY) == 0) && | ||||
((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) != | ||||
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)), | ||||
Show All 11 Lines | if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) | ||||
req_class = VM_ALLOC_SYSTEM; | req_class = VM_ALLOC_SYSTEM; | ||||
if (object != NULL) { | if (object != NULL) { | ||||
mpred = vm_radix_lookup_le(&object->rtree, pindex); | mpred = vm_radix_lookup_le(&object->rtree, pindex); | ||||
KASSERT(mpred == NULL || mpred->pindex != pindex, | KASSERT(mpred == NULL || mpred->pindex != pindex, | ||||
("vm_page_alloc: pindex already allocated")); | ("vm_page_alloc: pindex already allocated")); | ||||
} | } | ||||
#ifdef VM_PERCPU_FREE | |||||
if ((m = vm_page_percpu_alloc(object)) != NULL) { | |||||
flags = 0; | |||||
goto gotit; | |||||
} | |||||
#endif | |||||
/* | /* | ||||
* The page allocation request can came from consumers which already | * The page allocation request can came from consumers which already | ||||
* hold the free page queue mutex, like vm_page_insert() in | * hold the free page queue mutex, like vm_page_insert() in | ||||
* vm_page_cache(). | * vm_page_cache(). | ||||
*/ | */ | ||||
mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | ||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { | vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { | ||||
#ifdef VM_ENABLE_CACHE | |||||
/* | /* | ||||
* Allocate from the free queue if the number of free pages | * Allocate from the free queue if the number of free pages | ||||
* exceeds the minimum for the request class. | * exceeds the minimum for the request class. | ||||
*/ | */ | ||||
if (object != NULL && | if (object != NULL && | ||||
(m = vm_page_cache_lookup(object, pindex)) != NULL) { | (m = vm_page_cache_lookup(object, pindex)) != NULL) { | ||||
if ((req & VM_ALLOC_IFNOTCACHED) != 0) { | if ((req & VM_ALLOC_IFNOTCACHED) != 0) { | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (vm_phys_unfree_page(m)) | if (vm_phys_unfree_page(m)) | ||||
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); | vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
else if (!vm_reserv_reactivate_page(m)) | else if (!vm_reserv_reactivate_page(m)) | ||||
#else | #else | ||||
else | else | ||||
#endif | #endif | ||||
panic("vm_page_alloc: cache page %p is missing" | panic("vm_page_alloc: cache page %p is missing" | ||||
" from the free queue", m); | " from the free queue", m); | ||||
} else if ((req & VM_ALLOC_IFCACHED) != 0) { | } else | ||||
#endif | |||||
if ((req & VM_ALLOC_IFCACHED) != 0) { | |||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
return (NULL); | return (NULL); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
} else if (object == NULL || (object->flags & (OBJ_COLORED | | } else if (object == NULL || (object->flags & (OBJ_COLORED | | ||||
OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | OBJ_FICTITIOUS)) != OBJ_COLORED || (m = | ||||
vm_reserv_alloc_page(object, pindex, mpred)) == NULL) { | vm_reserv_alloc_page(object, pindex, mpred)) == NULL) { | ||||
#else | #else | ||||
} else { | } else { | ||||
Show All 29 Lines | #endif | ||||
KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); | KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m)); | ||||
KASSERT(!vm_page_sbusied(m), | KASSERT(!vm_page_sbusied(m), | ||||
("vm_page_alloc: page %p is busy", m)); | ("vm_page_alloc: page %p is busy", m)); | ||||
KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); | KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m)); | ||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, | ||||
("vm_page_alloc: page %p has unexpected memattr %d", m, | ("vm_page_alloc: page %p has unexpected memattr %d", m, | ||||
pmap_page_get_memattr(m))); | pmap_page_get_memattr(m))); | ||||
if ((m->flags & PG_CACHED) != 0) { | if ((m->flags & PG_CACHED) != 0) { | ||||
#ifdef VM_ENABLE_CACHE | |||||
KASSERT((m->flags & PG_ZERO) == 0, | KASSERT((m->flags & PG_ZERO) == 0, | ||||
("vm_page_alloc: cached page %p is PG_ZERO", m)); | ("vm_page_alloc: cached page %p is PG_ZERO", m)); | ||||
KASSERT(m->valid != 0, | KASSERT(m->valid != 0, | ||||
("vm_page_alloc: cached page %p is invalid", m)); | ("vm_page_alloc: cached page %p is invalid", m)); | ||||
if (m->object == object && m->pindex == pindex) | if (m->object == object && m->pindex == pindex) | ||||
vm_cnt.v_reactivated++; | vm_cnt.v_reactivated++; | ||||
else | else | ||||
m->valid = 0; | m->valid = 0; | ||||
m_object = m->object; | m_object = m->object; | ||||
vm_page_cache_remove(m); | vm_page_cache_remove(m); | ||||
if (m_object->type == OBJT_VNODE && | if (m_object->type == OBJT_VNODE && | ||||
vm_object_cache_is_empty(m_object)) | vm_object_cache_is_empty(m_object)) | ||||
vp = m_object->handle; | vp = m_object->handle; | ||||
#else | |||||
panic("Found unexpected cached page."); | |||||
#endif | |||||
} else { | } else { | ||||
KASSERT(m->valid == 0, | KASSERT(m->valid == 0, | ||||
("vm_page_alloc: free page %p is valid", m)); | ("vm_page_alloc: free page %p is valid", m)); | ||||
vm_phys_freecnt_adj(m, -1); | vm_phys_freecnt_adj(m, -1); | ||||
if ((m->flags & PG_ZERO) != 0) | if ((m->flags & PG_ZERO) != 0) | ||||
vm_page_zero_count--; | vm_page_zero_count--; | ||||
} | } | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
/* | /* | ||||
* Initialize the page. Only the PG_ZERO flag is inherited. | * Initialize the page. Only the PG_ZERO flag is inherited. | ||||
*/ | */ | ||||
flags = 0; | flags = 0; | ||||
if ((req & VM_ALLOC_ZERO) != 0) | if ((req & VM_ALLOC_ZERO) != 0) | ||||
flags = PG_ZERO; | flags = PG_ZERO; | ||||
flags &= m->flags; | flags &= m->flags; | ||||
#ifdef VM_PERCPU_FREE | |||||
gotit: | |||||
#endif | |||||
if ((req & VM_ALLOC_NODUMP) != 0) | if ((req & VM_ALLOC_NODUMP) != 0) | ||||
flags |= PG_NODUMP; | flags |= PG_NODUMP; | ||||
m->flags = flags; | m->flags = flags; | ||||
m->aflags = 0; | m->aflags = 0; | ||||
m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? | m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ? | ||||
VPO_UNMANAGED : 0; | VPO_UNMANAGED : 0; | ||||
m->busy_lock = VPB_UNBUSIED; | m->busy_lock = VPB_UNBUSIED; | ||||
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) | if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) | ||||
▲ Show 20 Lines • Show All 325 Lines • ▼ Show 20 Lines | vm_page_alloc_freelist(int flind, int req) | ||||
/* | /* | ||||
* Do not allocate reserved pages unless the req has asked for it. | * Do not allocate reserved pages unless the req has asked for it. | ||||
*/ | */ | ||||
mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | ||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) | vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) | ||||
m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); | m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); | ||||
else { | else { | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
atomic_add_int(&vm_pageout_deficit, | atomic_add_int(&vm_pageout_deficit, | ||||
max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); | max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); | ||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 253 Lines • ▼ Show 20 Lines | vm_page_free_wakeup(void) | ||||
* lots of memory. this process will swapin processes. | * lots of memory. this process will swapin processes. | ||||
*/ | */ | ||||
if (vm_pages_needed && !vm_page_count_min()) { | if (vm_pages_needed && !vm_page_count_min()) { | ||||
vm_pages_needed = 0; | vm_pages_needed = 0; | ||||
wakeup(&vm_cnt.v_free_count); | wakeup(&vm_cnt.v_free_count); | ||||
} | } | ||||
} | } | ||||
#ifdef VM_ENABLE_CACHE | |||||
/* | /* | ||||
* Turn a cached page into a free page, by changing its attributes. | * Turn a cached page into a free page, by changing its attributes. | ||||
* Keep the statistics up-to-date. | * Keep the statistics up-to-date. | ||||
* | * | ||||
* The free page queue must be locked. | * The free page queue must be locked. | ||||
*/ | */ | ||||
static void | static void | ||||
vm_page_cache_turn_free(vm_page_t m) | vm_page_cache_turn_free(vm_page_t m) | ||||
{ | { | ||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); | ||||
m->object = NULL; | m->object = NULL; | ||||
m->valid = 0; | m->valid = 0; | ||||
KASSERT((m->flags & PG_CACHED) != 0, | KASSERT((m->flags & PG_CACHED) != 0, | ||||
("vm_page_cache_turn_free: page %p is not cached", m)); | ("vm_page_cache_turn_free: page %p is not cached", m)); | ||||
m->flags &= ~PG_CACHED; | m->flags &= ~PG_CACHED; | ||||
vm_cnt.v_cache_count--; | vm_cnt.v_cache_count--; | ||||
vm_phys_freecnt_adj(m, 1); | vm_phys_freecnt_adj(m, 1); | ||||
} | } | ||||
#endif | |||||
/* | /* | ||||
* vm_page_free_toq: | * vm_page_free_toq: | ||||
* | * | ||||
* Returns the given page to the free list, | * Returns the given page to the free list, | ||||
* disassociating it with any VM object. | * disassociating it with any VM object. | ||||
* | * | ||||
* The object must be locked. The page must be locked if it is managed. | * The object must be locked. The page must be locked if it is managed. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_free_toq(vm_page_t m) | vm_page_free_toq(vm_page_t m) | ||||
{ | { | ||||
#ifdef VM_PERCPU_FREE | |||||
int can_cache; | |||||
#endif | |||||
if ((m->oflags & VPO_UNMANAGED) == 0) { | if ((m->oflags & VPO_UNMANAGED) == 0) { | ||||
vm_page_lock_assert(m, MA_OWNED); | vm_page_lock_assert(m, MA_OWNED); | ||||
KASSERT(!pmap_page_is_mapped(m), | KASSERT(!pmap_page_is_mapped(m), | ||||
("vm_page_free_toq: freeing mapped page %p", m)); | ("vm_page_free_toq: freeing mapped page %p", m)); | ||||
} else | } else | ||||
KASSERT(m->queue == PQ_NONE, | KASSERT(m->queue == PQ_NONE, | ||||
("vm_page_free_toq: unmanaged page %p is queued", m)); | ("vm_page_free_toq: unmanaged page %p is queued", m)); | ||||
PCPU_INC(cnt.v_tfree); | PCPU_INC(cnt.v_tfree); | ||||
if (vm_page_sbusied(m)) | if (vm_page_sbusied(m)) | ||||
panic("vm_page_free: freeing busy page %p", m); | panic("vm_page_free: freeing busy page %p", m); | ||||
#ifdef VM_PERCPU_FREE | |||||
can_cache = 0; | |||||
if (m->object != NULL) { | |||||
VM_OBJECT_ASSERT_LOCKED(m->object); | |||||
can_cache = ((m->object->flags & OBJ_COLORED) == 0); | |||||
} | |||||
#endif | |||||
/* | /* | ||||
* Unqueue, then remove page. Note that we cannot destroy | * Unqueue, then remove page. Note that we cannot destroy | ||||
* the page here because we do not want to call the pager's | * the page here because we do not want to call the pager's | ||||
* callback routine until after we've put the page on the | * callback routine until after we've put the page on the | ||||
* appropriate free queue. | * appropriate free queue. | ||||
*/ | */ | ||||
vm_page_remque(m); | vm_page_remque(m); | ||||
vm_page_remove(m); | vm_page_remove(m); | ||||
Show All 18 Lines | if (m->hold_count != 0) { | ||||
m->flags |= PG_UNHOLDFREE; | m->flags |= PG_UNHOLDFREE; | ||||
} else { | } else { | ||||
/* | /* | ||||
* Restore the default memory attribute to the page. | * Restore the default memory attribute to the page. | ||||
*/ | */ | ||||
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) | if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) | ||||
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); | pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); | ||||
#ifdef VM_PERCPU_FREE | |||||
if (can_cache) { | |||||
vm_page_percpu_free(m); | |||||
return; | |||||
} | |||||
#endif | |||||
/* | /* | ||||
* Insert the page into the physical memory allocator's | * Insert the page into the physical memory allocator's | ||||
* cache/free page queues. | * cache/free page queues. | ||||
*/ | */ | ||||
mtx_lock(&vm_page_queue_free_mtx); | mtx_lock(&vm_page_queue_free_mtx); | ||||
vm_phys_freecnt_adj(m, 1); | vm_phys_freecnt_adj(m, 1); | ||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (!vm_reserv_free_page(m)) | if (!vm_reserv_free_page(m)) | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
* If a page is fictitious, then its wire count must always be one. | * If a page is fictitious, then its wire count must always be one. | ||||
* | * | ||||
* A managed page must be locked. | * A managed page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_unwire(vm_page_t m, uint8_t queue) | vm_page_unwire(vm_page_t m, uint8_t queue) | ||||
{ | { | ||||
KASSERT(queue < PQ_COUNT, | KASSERT(queue < PQ_COUNT || queue == PQ_NONE, | ||||
("vm_page_unwire: invalid queue %u request for page %p", | ("vm_page_unwire: invalid queue %u request for page %p", | ||||
queue, m)); | queue, m)); | ||||
if ((m->oflags & VPO_UNMANAGED) == 0) | if ((m->oflags & VPO_UNMANAGED) == 0) | ||||
vm_page_lock_assert(m, MA_OWNED); | vm_page_lock_assert(m, MA_OWNED); | ||||
if ((m->flags & PG_FICTITIOUS) != 0) { | if ((m->flags & PG_FICTITIOUS) != 0) { | ||||
KASSERT(m->wire_count == 1, | KASSERT(m->wire_count == 1, | ||||
("vm_page_unwire: fictitious page %p's wire count isn't one", m)); | ("vm_page_unwire: fictitious page %p's wire count isn't one", m)); | ||||
return; | return; | ||||
} | } | ||||
if (m->wire_count > 0) { | if (m->wire_count > 0) { | ||||
m->wire_count--; | m->wire_count--; | ||||
if (m->wire_count == 0) { | if (m->wire_count == 0) { | ||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1); | atomic_subtract_int(&vm_cnt.v_wire_count, 1); | ||||
if ((m->oflags & VPO_UNMANAGED) != 0 || | if ((m->oflags & VPO_UNMANAGED) != 0 || | ||||
m->object == NULL) | m->object == NULL) | ||||
return; | return; | ||||
if (queue == PQ_INACTIVE) | if (queue == PQ_INACTIVE) | ||||
m->flags &= ~PG_WINATCFLS; | m->flags &= ~PG_WINATCFLS; | ||||
if (queue != PQ_NONE) | |||||
vm_page_enqueue(queue, m); | vm_page_enqueue(queue, m); | ||||
} | } | ||||
} else | } else | ||||
panic("vm_page_unwire: page %p's wire count is zero", m); | panic("vm_page_unwire: page %p's wire count is zero", m); | ||||
} | } | ||||
/* | /* | ||||
* Move the specified page to the inactive queue. | * Move the specified page to the inactive queue. | ||||
* | * | ||||
Show All 29 Lines | if ((queue = m->queue) == PQ_INACTIVE) | ||||
return; | return; | ||||
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { | if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) { | ||||
if (queue != PQ_NONE) | if (queue != PQ_NONE) | ||||
vm_page_dequeue(m); | vm_page_dequeue(m); | ||||
m->flags &= ~PG_WINATCFLS; | m->flags &= ~PG_WINATCFLS; | ||||
pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; | pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE]; | ||||
vm_pagequeue_lock(pq); | vm_pagequeue_lock(pq); | ||||
m->queue = PQ_INACTIVE; | m->queue = PQ_INACTIVE; | ||||
if (athead) | if (athead) | ||||
TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q); | TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q); | ||||
else | else | ||||
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q); | ||||
vm_pagequeue_cnt_inc(pq); | vm_pagequeue_cnt_inc(pq); | ||||
vm_pagequeue_unlock(pq); | vm_pagequeue_unlock(pq); | ||||
} | } | ||||
} | } | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | |||||
* Put the specified page onto the page cache queue (if appropriate). | * Put the specified page onto the page cache queue (if appropriate). | ||||
* | * | ||||
* The object and page must be locked. | * The object and page must be locked. | ||||
*/ | */ | ||||
void | void | ||||
vm_page_cache(vm_page_t m) | vm_page_cache(vm_page_t m) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
#ifdef VM_ENABLE_CACHE | |||||
boolean_t cache_was_empty; | boolean_t cache_was_empty; | ||||
#endif | |||||
vm_page_lock_assert(m, MA_OWNED); | vm_page_lock_assert(m, MA_OWNED); | ||||
object = m->object; | object = m->object; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) || | if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) || | ||||
m->hold_count || m->wire_count) | m->hold_count || m->wire_count) | ||||
panic("vm_page_cache: attempting to cache busy page"); | panic("vm_page_cache: attempting to cache busy page"); | ||||
KASSERT(!pmap_page_is_mapped(m), | KASSERT(!pmap_page_is_mapped(m), | ||||
("vm_page_cache: page %p is mapped", m)); | ("vm_page_cache: page %p is mapped", m)); | ||||
KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m)); | KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m)); | ||||
if (m->valid == 0 || object->type == OBJT_DEFAULT || | if (m->valid == 0 || object->type == OBJT_DEFAULT || | ||||
(object->type == OBJT_SWAP && | (object->type == OBJT_SWAP && | ||||
!vm_pager_has_page(object, m->pindex, NULL, NULL))) { | !vm_pager_has_page(object, m->pindex, NULL, NULL))) { | ||||
/* | /* | ||||
* Hypothesis: A cache-eligible page belonging to a | * Hypothesis: A cache-eligible page belonging to a | ||||
* default object or swap object but without a backing | * default object or swap object but without a backing | ||||
* store must be zero filled. | * store must be zero filled. | ||||
*/ | */ | ||||
vm_page_free(m); | vm_page_free(m); | ||||
return; | return; | ||||
} | } | ||||
#ifdef VM_ENABLE_CACHE | |||||
KASSERT((m->flags & PG_CACHED) == 0, | KASSERT((m->flags & PG_CACHED) == 0, | ||||
("vm_page_cache: page %p is already cached", m)); | ("vm_page_cache: page %p is already cached", m)); | ||||
/* | /* | ||||
* Remove the page from the paging queues. | * Remove the page from the paging queues. | ||||
*/ | */ | ||||
vm_page_remque(m); | vm_page_remque(m); | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | #endif | ||||
* the object's only resident page. | * the object's only resident page. | ||||
*/ | */ | ||||
if (object->type == OBJT_VNODE) { | if (object->type == OBJT_VNODE) { | ||||
if (cache_was_empty && object->resident_page_count != 0) | if (cache_was_empty && object->resident_page_count != 0) | ||||
vhold(object->handle); | vhold(object->handle); | ||||
else if (!cache_was_empty && object->resident_page_count == 0) | else if (!cache_was_empty && object->resident_page_count == 0) | ||||
vdrop(object->handle); | vdrop(object->handle); | ||||
} | } | ||||
#else /* !VM_ENABLE_CACHE */ | |||||
vm_page_free(m); | |||||
#endif | |||||
} | } | ||||
/* | /* | ||||
* vm_page_advise | * vm_page_advise | ||||
* | * | ||||
* Cache, deactivate, or do nothing as appropriate. This routine | * Cache, deactivate, or do nothing as appropriate. This routine | ||||
* is used by madvise(). | * is used by madvise(). | ||||
* | * | ||||
▲ Show 20 Lines • Show All 632 Lines • Show Last 20 Lines |