Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 1,627 Lines • ▼ Show 20 Lines | vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) | ||||
} | } | ||||
/* | /* | ||||
* The page allocation request can came from consumers which already | * The page allocation request can came from consumers which already | ||||
* hold the free page queue mutex, like vm_page_insert() in | * hold the free page queue mutex, like vm_page_insert() in | ||||
* vm_page_cache(). | * vm_page_cache(). | ||||
*/ | */ | ||||
mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); | ||||
m = NULL; | |||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || | ||||
(req_class == VM_ALLOC_SYSTEM && | (req_class == VM_ALLOC_SYSTEM && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) || | ||||
(req_class == VM_ALLOC_INTERRUPT && | (req_class == VM_ALLOC_INTERRUPT && | ||||
vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { | vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) { | ||||
/* | /* | ||||
* Allocate from the free queue if the number of free pages | * Allocate from the free queue if the number of free pages | ||||
* exceeds the minimum for the request class. | * exceeds the minimum for the request class. | ||||
Show All 28 Lines | |||||
#if VM_NRESERVLEVEL > 0 | #if VM_NRESERVLEVEL > 0 | ||||
if (m == NULL && vm_reserv_reclaim_inactive()) { | if (m == NULL && vm_reserv_reclaim_inactive()) { | ||||
m = vm_phys_alloc_pages(object != NULL ? | m = vm_phys_alloc_pages(object != NULL ? | ||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, | VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, | ||||
0); | 0); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
} else { | } | ||||
/* | |||||
* Can't allocate or attempted to and couldn't allocate a page | |||||
* given the current VM policy. Give up. | |||||
* | |||||
* Note - yes, this is one of the current shortcomings of the | |||||
* VM domain design - there's a global set of vm_cnt counters, | |||||
* and it's quite possible things will get unhappy with this. | |||||
* However without it'll kernel panic below - the code didn't | |||||
* check m == NULL here and would continue. | |||||
*/ | |||||
if (m == NULL) { | |||||
/* | /* | ||||
* Not allocatable, give up. | * Not allocatable, give up. | ||||
*/ | */ | ||||
mtx_unlock(&vm_page_queue_free_mtx); | mtx_unlock(&vm_page_queue_free_mtx); | ||||
atomic_add_int(&vm_pageout_deficit, | atomic_add_int(&vm_pageout_deficit, | ||||
max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); | max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1)); | ||||
pagedaemon_wakeup(); | pagedaemon_wakeup(); | ||||
return (NULL); | return (NULL); | ||||
▲ Show 20 Lines • Show All 1,685 Lines • Show Last 20 Lines |