Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/swap_pager.c
Show First 20 Lines • Show All 1,644 Lines • ▼ Show 20 Lines | |||||
int | int | ||||
swap_pager_nswapdev(void) | swap_pager_nswapdev(void) | ||||
{ | { | ||||
return (nswapdev); | return (nswapdev); | ||||
} | } | ||||
static void | |||||
swp_pager_force_dirty(vm_page_t m) | |||||
{ | |||||
vm_page_dirty(m); | |||||
#ifdef INVARIANTS | |||||
vm_page_lock(m); | |||||
if (!vm_page_wired(m) && m->queue == PQ_NONE) | |||||
panic("page %p is neither wired nor queued", m); | |||||
vm_page_unlock(m); | |||||
#endif | |||||
vm_page_xunbusy(m); | |||||
} | |||||
static void | |||||
swp_pager_force_launder(vm_page_t m) | |||||
{ | |||||
vm_page_dirty(m); | |||||
vm_page_lock(m); | |||||
vm_page_launder(m); | |||||
vm_page_unlock(m); | |||||
vm_page_xunbusy(m); | |||||
} | |||||
/* | /* | ||||
* SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in | * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in | ||||
* | * | ||||
* This routine dissociates the page at the given index within an object | * This routine dissociates the page at the given index within an object | ||||
* from its backing store, paging it in if it does not reside in memory. | * from its backing store, paging it in if it does not reside in memory. | ||||
* If the page is paged in, it is marked dirty and placed in the laundry | * If the page is paged in, it is marked dirty and placed in the laundry | ||||
* queue. The page is marked dirty because it no longer has backing | * queue. The page is marked dirty because it no longer has backing | ||||
* store. It is placed in the laundry queue because it has not been | * store. It is placed in the laundry queue because it has not been | ||||
* accessed recently. Otherwise, it would already reside in memory. | * accessed recently. Otherwise, it would already reside in memory. | ||||
* | * | ||||
* We also attempt to swap in all other pages in the swap block. | * We also attempt to swap in all other pages in the swap block. | ||||
* However, we only guarantee that the one at the specified index is | * However, we only guarantee that the one at the specified index is | ||||
* paged in. | * paged in. | ||||
* | * | ||||
* XXX - The code to page the whole block in doesn't work, so we | * XXX - The code to page the whole block in doesn't work, so we | ||||
* revert to the one-by-one behavior for now. Sigh. | * revert to the one-by-one behavior for now. Sigh. | ||||
*/ | */ | ||||
static inline void | static void | ||||
swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) | swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_object_pip_add(object, 1); | vm_object_pip_add(object, 1); | ||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); | m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); | ||||
if (m->valid == VM_PAGE_BITS_ALL) { | if (m->valid == VM_PAGE_BITS_ALL) { | ||||
vm_object_pip_wakeup(object); | vm_object_pip_wakeup(object); | ||||
vm_page_dirty(m); | swp_pager_force_dirty(m); | ||||
#ifdef INVARIANTS | |||||
vm_page_lock(m); | |||||
if (!vm_page_wired(m) && m->queue == PQ_NONE) | |||||
panic("page %p is neither wired nor queued", m); | |||||
vm_page_unlock(m); | |||||
#endif | |||||
vm_page_xunbusy(m); | |||||
vm_pager_page_unswapped(m); | vm_pager_page_unswapped(m); | ||||
return; | return; | ||||
} | } | ||||
if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) | if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) | ||||
panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ | panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ | ||||
vm_object_pip_wakeup(object); | vm_object_pip_wakeup(object); | ||||
vm_page_dirty(m); | swp_pager_force_launder(m); | ||||
vm_page_lock(m); | |||||
vm_page_launder(m); | |||||
vm_page_unlock(m); | |||||
vm_page_xunbusy(m); | |||||
vm_pager_page_unswapped(m); | vm_pager_page_unswapped(m); | ||||
} | } | ||||
/* | /* | ||||
* swap_pager_swapoff_object: | |||||
* | |||||
* Page in all of the pages that have been paged out for an object | |||||
* from a given swap device. | |||||
*/ | |||||
static void | |||||
swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) | |||||
{ | |||||
struct swblk *sb; | |||||
vm_pindex_t pi; | |||||
int i; | |||||
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | |||||
&object->un_pager.swp.swp_blks, pi)) != NULL; ) { | |||||
pi = sb->p + SWAP_META_PAGES; | |||||
for (i = 0; i < SWAP_META_PAGES; i++) { | |||||
if (sb->d[i] == SWAPBLK_NONE) | |||||
continue; | |||||
if (swp_pager_isondev(sb->d[i], sp)) | |||||
swp_pager_force_pagein(object, sb->p + i); | |||||
} | |||||
} | |||||
} | |||||
/* | |||||
* swap_pager_swapoff: | * swap_pager_swapoff: | ||||
* | * | ||||
* Page in all of the pages that have been paged out to the | * Page in all of the pages that have been paged out to the | ||||
* given device. The corresponding blocks in the bitmap must be | * given device. The corresponding blocks in the bitmap must be | ||||
* marked as allocated and the device must be flagged SW_CLOSING. | * marked as allocated and the device must be flagged SW_CLOSING. | ||||
* There may be no processes swapped out to the device. | * There may be no processes swapped out to the device. | ||||
* | * | ||||
* This routine may block. | * This routine may block. | ||||
*/ | */ | ||||
static void | static void | ||||
swap_pager_swapoff(struct swdevt *sp) | swap_pager_swapoff(struct swdevt *sp) | ||||
{ | { | ||||
struct swblk *sb; | |||||
vm_object_t object; | vm_object_t object; | ||||
vm_pindex_t pi; | int retries; | ||||
int i, retries; | |||||
sx_assert(&swdev_syscall_lock, SA_XLOCKED); | sx_assert(&swdev_syscall_lock, SA_XLOCKED); | ||||
retries = 0; | retries = 0; | ||||
full_rescan: | full_rescan: | ||||
mtx_lock(&vm_object_list_mtx); | mtx_lock(&vm_object_list_mtx); | ||||
TAILQ_FOREACH(object, &vm_object_list, object_list) { | TAILQ_FOREACH(object, &vm_object_list, object_list) { | ||||
if (object->type != OBJT_SWAP) | if (object->type != OBJT_SWAP) | ||||
Show All 13 Lines | TAILQ_FOREACH(object, &vm_object_list, object_list) { | ||||
* initialization. We must not access pctrie below | * initialization. We must not access pctrie below | ||||
* unless we checked that our object is swap and not | * unless we checked that our object is swap and not | ||||
* dead. | * dead. | ||||
*/ | */ | ||||
atomic_thread_fence_acq(); | atomic_thread_fence_acq(); | ||||
if (object->type != OBJT_SWAP) | if (object->type != OBJT_SWAP) | ||||
goto next_obj; | goto next_obj; | ||||
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | swap_pager_swapoff_object(sp, object); | ||||
&object->un_pager.swp.swp_blks, pi)) != NULL; ) { | |||||
pi = sb->p + SWAP_META_PAGES; | |||||
for (i = 0; i < SWAP_META_PAGES; i++) { | |||||
if (sb->d[i] == SWAPBLK_NONE) | |||||
continue; | |||||
if (swp_pager_isondev(sb->d[i], sp)) | |||||
swp_pager_force_pagein(object, | |||||
sb->p + i); | |||||
} | |||||
} | |||||
next_obj: | next_obj: | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
mtx_lock(&vm_object_list_mtx); | mtx_lock(&vm_object_list_mtx); | ||||
} | } | ||||
mtx_unlock(&vm_object_list_mtx); | mtx_unlock(&vm_object_list_mtx); | ||||
if (sp->sw_used) { | if (sp->sw_used) { | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 1,165 Lines • Show Last 20 Lines |