Changeset View
Changeset View
Standalone View
Standalone View
sys/vm/vm_page.c
Show First 20 Lines • Show All 2,893 Lines • ▼ Show 20 Lines | vm_domain_clear(struct vm_domain *vmd) | ||||
} | } | ||||
mtx_unlock(&vm_domainset_lock); | mtx_unlock(&vm_domainset_lock); | ||||
} | } | ||||
/* | /* | ||||
* Wait for free pages to exceed the min threshold globally. | * Wait for free pages to exceed the min threshold globally. | ||||
*/ | */ | ||||
void | void | ||||
vm_wait_min(void) | vm_wait_min(int timo) | ||||
{ | { | ||||
mtx_lock(&vm_domainset_lock); | mtx_lock(&vm_domainset_lock); | ||||
while (vm_page_count_min()) { | while (vm_page_count_min()) { | ||||
vm_min_waiters++; | vm_min_waiters++; | ||||
msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0); | msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", timo); | ||||
} | } | ||||
mtx_unlock(&vm_domainset_lock); | mtx_unlock(&vm_domainset_lock); | ||||
} | } | ||||
/* | /* | ||||
* Wait for free pages to exceed the severe threshold globally. | * Wait for free pages to exceed the severe threshold globally. | ||||
*/ | */ | ||||
void | void | ||||
Show All 12 Lines | |||||
u_int | u_int | ||||
vm_wait_count(void) | vm_wait_count(void) | ||||
{ | { | ||||
return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); | return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters); | ||||
} | } | ||||
static void | static void | ||||
vm_wait_doms(const domainset_t *wdoms) | vm_wait_doms(const domainset_t *wdoms, int timo) | ||||
{ | { | ||||
/* | /* | ||||
* We use racey wakeup synchronization to avoid expensive global | * We use racey wakeup synchronization to avoid expensive global | ||||
* locking for the pageproc when sleeping with a non-specific vm_wait. | * locking for the pageproc when sleeping with a non-specific vm_wait. | ||||
* To handle this, we only sleep for one tick in this instance. It | * To handle this, we only sleep for one tick in this instance. It | ||||
* is expected that most allocations for the pageproc will come from | * is expected that most allocations for the pageproc will come from | ||||
* kmem or vm_page_grab* which will use the more specific and | * kmem or vm_page_grab* which will use the more specific and | ||||
Show All 9 Lines | if (curproc == pageproc) { | ||||
* XXX Ideally we would wait only until the allocation could | * XXX Ideally we would wait only until the allocation could | ||||
* be satisfied. This condition can cause new allocators to | * be satisfied. This condition can cause new allocators to | ||||
* consume all freed pages while old allocators wait. | * consume all freed pages while old allocators wait. | ||||
*/ | */ | ||||
mtx_lock(&vm_domainset_lock); | mtx_lock(&vm_domainset_lock); | ||||
if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) { | if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) { | ||||
vm_min_waiters++; | vm_min_waiters++; | ||||
msleep(&vm_min_domains, &vm_domainset_lock, PVM, | msleep(&vm_min_domains, &vm_domainset_lock, PVM, | ||||
"vmwait", 0); | "vmwait", timo); | ||||
} | } | ||||
mtx_unlock(&vm_domainset_lock); | mtx_unlock(&vm_domainset_lock); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* vm_wait_domain: | * vm_wait_domain: | ||||
* | * | ||||
Show All 17 Lines | if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) { | ||||
&vm_domainset_lock, PDROP | PSWP, "VMWait", 0); | &vm_domainset_lock, PDROP | PSWP, "VMWait", 0); | ||||
} else | } else | ||||
mtx_unlock(&vm_domainset_lock); | mtx_unlock(&vm_domainset_lock); | ||||
} else { | } else { | ||||
if (pageproc == NULL) | if (pageproc == NULL) | ||||
panic("vm_wait in early boot"); | panic("vm_wait in early boot"); | ||||
DOMAINSET_ZERO(&wdom); | DOMAINSET_ZERO(&wdom); | ||||
DOMAINSET_SET(vmd->vmd_domain, &wdom); | DOMAINSET_SET(vmd->vmd_domain, &wdom); | ||||
vm_wait_doms(&wdom); | vm_wait_doms(&wdom, 0); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* vm_wait: | * vm_wait: | ||||
* | * | ||||
* Sleep until free pages are available for allocation in the | * Sleep until free pages are available for allocation in the | ||||
* affinity domains of the obj. If obj is NULL, the domain set | * affinity domains of the obj. If obj is NULL, the domain set | ||||
* for the calling thread is used. | * for the calling thread is used. | ||||
* Called in various places after failed memory allocations. | * Called in various places after failed memory allocations. | ||||
*/ | */ | ||||
void | void | ||||
vm_wait(vm_object_t obj) | vm_wait(vm_object_t obj, int timo) | ||||
{ | { | ||||
struct domainset *d; | struct domainset *d; | ||||
d = NULL; | d = NULL; | ||||
/* | /* | ||||
* Carefully fetch pointers only once: the struct domainset | * Carefully fetch pointers only once: the struct domainset | ||||
* itself is ummutable but the pointer might change. | * itself is ummutable but the pointer might change. | ||||
*/ | */ | ||||
if (obj != NULL) | if (obj != NULL) | ||||
d = obj->domain.dr_policy; | d = obj->domain.dr_policy; | ||||
if (d == NULL) | if (d == NULL) | ||||
d = curthread->td_domain.dr_policy; | d = curthread->td_domain.dr_policy; | ||||
vm_wait_doms(&d->ds_mask); | vm_wait_doms(&d->ds_mask, timo); | ||||
} | } | ||||
/* | /* | ||||
* vm_domain_alloc_fail: | * vm_domain_alloc_fail: | ||||
* | * | ||||
* Called when a page allocation function fails. Informs the | * Called when a page allocation function fails. Informs the | ||||
* pagedaemon and performs the requested wait. Requires the | * pagedaemon and performs the requested wait. Requires the | ||||
* domain_free and object lock on entry. Returns with the | * domain_free and object lock on entry. Returns with the | ||||
Show All 20 Lines | vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req) | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* vm_waitpfault: | * vm_waitpfault: | ||||
* | * | ||||
* Sleep until free pages are available for allocation. | * Sleep until free pages are available for allocation. | ||||
markj: "until free pages are available for allocation, or the requested timeout has elapsed." | |||||
* - Called only in vm_fault so that processes page faulting | * - Called only in vm_fault so that processes page faulting | ||||
* can be easily tracked. | * can be easily tracked. | ||||
Not Done Inline ActionsGiven that this function is defined to have only one caller, vm_fault(), couldn't we pass PCATCH to the msleep() call? alc: Given that this function is defined to have only one caller, vm_fault(), couldn't we pass… | |||||
Done Inline ActionsSo what would be the semantic ? We allow the sleep interruption with PCATCH, but the signal cannot be delivered right now so it is queued for the process. I do not see a way to interrupt the page fault handler, we either should fail it or restart. So the signal sent would just cause the earlier restart of the fault handler loop ? IMO timeout is enough for that. kib: So what would be the semantic ? We allow the sleep interruption with PCATCH, but the signal… | |||||
* - Sleeps at a lower priority than vm_wait() so that vm_wait()ing | * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing | ||||
* processes will be able to grab memory first. Do not change | * processes will be able to grab memory first. Do not change | ||||
* this balance without careful testing first. | * this balance without careful testing first. | ||||
*/ | */ | ||||
void | void | ||||
vm_waitpfault(void) | vm_waitpfault(int timo) | ||||
{ | { | ||||
mtx_lock(&vm_domainset_lock); | mtx_lock(&vm_domainset_lock); | ||||
if (vm_page_count_min()) { | if (vm_page_count_min()) { | ||||
vm_min_waiters++; | vm_min_waiters++; | ||||
msleep(&vm_min_domains, &vm_domainset_lock, PUSER, "pfault", 0); | msleep(&vm_min_domains, &vm_domainset_lock, PUSER, "pfault", | ||||
timo); | |||||
} | } | ||||
mtx_unlock(&vm_domainset_lock); | mtx_unlock(&vm_domainset_lock); | ||||
} | } | ||||
struct vm_pagequeue * | struct vm_pagequeue * | ||||
vm_page_pagequeue(vm_page_t m) | vm_page_pagequeue(vm_page_t m) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 1,419 Lines • Show Last 20 Lines |
"until free pages are available for allocation, or the requested timeout has elapsed."