Changeset View
Changeset View
Standalone View
Standalone View
head/sys/vm/swap_pager.c
Show First 20 Lines • Show All 1,656 Lines • ▼ Show 20 Lines | swp_pager_force_dirty(vm_page_t m) | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
#ifdef INVARIANTS | #ifdef INVARIANTS | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
if (!vm_page_wired(m) && m->queue == PQ_NONE) | if (!vm_page_wired(m) && m->queue == PQ_NONE) | ||||
panic("page %p is neither wired nor queued", m); | panic("page %p is neither wired nor queued", m); | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
#endif | #endif | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
swap_pager_unswapped(m); | |||||
} | } | ||||
static void | static void | ||||
swp_pager_force_launder(vm_page_t m) | swp_pager_force_launder(vm_page_t m) | ||||
{ | { | ||||
vm_page_dirty(m); | vm_page_dirty(m); | ||||
vm_page_lock(m); | vm_page_lock(m); | ||||
vm_page_launder(m); | vm_page_launder(m); | ||||
vm_page_unlock(m); | vm_page_unlock(m); | ||||
vm_page_xunbusy(m); | vm_page_xunbusy(m); | ||||
swap_pager_unswapped(m); | |||||
} | } | ||||
/* | /* | ||||
* SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in | * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in | ||||
* | * | ||||
* This routine dissociates the page at the given index within an object | * This routine dissociates pages starting at the given index within an | ||||
* from its backing store, paging it in if it does not reside in memory. | * object from their backing store, paging them in if they do not reside | ||||
* If the page is paged in, it is marked dirty and placed in the laundry | * in memory. Pages that are paged in are marked dirty and placed in the | ||||
* queue. The page is marked dirty because it no longer has backing | * laundry queue. Pages are marked dirty because they no longer have | ||||
* store. It is placed in the laundry queue because it has not been | * backing store. They are placed in the laundry queue because they have | ||||
* accessed recently. Otherwise, it would already reside in memory. | * not been accessed recently. Otherwise, they would already reside in | ||||
* | * memory. | ||||
* We also attempt to swap in all other pages in the swap block. | |||||
* However, we only guarantee that the one at the specified index is | |||||
* paged in. | |||||
* | |||||
* XXX - The code to page the whole block in doesn't work, so we | |||||
* revert to the one-by-one behavior for now. Sigh. | |||||
*/ | */ | ||||
static void | static void | ||||
swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex) | swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages) | ||||
{ | { | ||||
vm_page_t m; | vm_page_t ma[npages]; | ||||
int i, j; | |||||
vm_object_pip_add(object, 1); | KASSERT(npages > 0, ("%s: No pages", __func__)); | ||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); | KASSERT(npages <= MAXPHYS / PAGE_SIZE, | ||||
if (m->valid == VM_PAGE_BITS_ALL) { | ("%s: Too many pages: %d", __func__, npages)); | ||||
vm_object_pip_wakeup(object); | vm_object_pip_add(object, npages); | ||||
swp_pager_force_dirty(m); | vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages); | ||||
vm_pager_page_unswapped(m); | for (i = j = 0;; i++) { | ||||
return; | /* Count nonresident pages, to page-in all at once. */ | ||||
if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL) | |||||
continue; | |||||
if (j < i) { | |||||
/* Page-in nonresident pages. Mark for laundering. */ | |||||
if (swap_pager_getpages(object, &ma[j], i - j, NULL, | |||||
NULL) != VM_PAGER_OK) | |||||
panic("%s: read from swap failed", __func__); | |||||
do { | |||||
swp_pager_force_launder(ma[j]); | |||||
} while (++j < i); | |||||
} | } | ||||
if (i == npages) | |||||
if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK) | break; | ||||
panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ | /* Mark dirty a resident page. */ | ||||
vm_object_pip_wakeup(object); | swp_pager_force_dirty(ma[j++]); | ||||
swp_pager_force_launder(m); | |||||
vm_pager_page_unswapped(m); | |||||
} | } | ||||
vm_object_pip_wakeupn(object, npages); | |||||
} | |||||
/* | /* | ||||
* swap_pager_swapoff_object: | * swap_pager_swapoff_object: | ||||
* | * | ||||
* Page in all of the pages that have been paged out for an object | * Page in all of the pages that have been paged out for an object | ||||
* from a given swap device. | * to a swap device. | ||||
*/ | */ | ||||
static void | static void | ||||
swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) | swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
vm_pindex_t pi; | vm_pindex_t pi, s_pindex; | ||||
daddr_t blk, n_blks, s_blk; | |||||
int i; | int i; | ||||
n_blks = 0; | |||||
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | ||||
&object->un_pager.swp.swp_blks, pi)) != NULL; ) { | &object->un_pager.swp.swp_blks, pi)) != NULL; ) { | ||||
pi = sb->p + SWAP_META_PAGES; | |||||
for (i = 0; i < SWAP_META_PAGES; i++) { | for (i = 0; i < SWAP_META_PAGES; i++) { | ||||
if (sb->d[i] == SWAPBLK_NONE) | blk = sb->d[i]; | ||||
if (!swp_pager_isondev(blk, sp)) | |||||
blk = SWAPBLK_NONE; | |||||
/* | |||||
* If there are no blocks/pages accumulated, start a new | |||||
* accumulation here. | |||||
*/ | |||||
if (n_blks == 0) { | |||||
if (blk != SWAPBLK_NONE) { | |||||
s_blk = blk; | |||||
s_pindex = sb->p + i; | |||||
n_blks = 1; | |||||
} | |||||
continue; | continue; | ||||
if (swp_pager_isondev(sb->d[i], sp)) | |||||
swp_pager_force_pagein(object, sb->p + i); | |||||
} | } | ||||
/* | |||||
* If the accumulation can be extended without breaking | |||||
* the sequence of consecutive blocks and pages that | |||||
* swp_pager_force_pagein() depends on, do so. | |||||
*/ | |||||
if (n_blks < MAXPHYS / PAGE_SIZE && | |||||
s_blk + n_blks == blk && | |||||
s_pindex + n_blks == sb->p + i) { | |||||
++n_blks; | |||||
continue; | |||||
} | } | ||||
/* | |||||
* The sequence of consecutive blocks and pages cannot | |||||
* be extended, so page them all in here. Then, | |||||
* because doing so involves releasing and reacquiring | |||||
* a lock that protects the swap block pctrie, do not | |||||
* rely on the current swap block. Break this loop and | |||||
* re-fetch the same pindex from the pctrie again. | |||||
*/ | |||||
swp_pager_force_pagein(object, s_pindex, n_blks); | |||||
n_blks = 0; | |||||
break; | |||||
} | |||||
if (i == SWAP_META_PAGES) | |||||
pi = sb->p + SWAP_META_PAGES; | |||||
} | |||||
if (n_blks > 0) | |||||
swp_pager_force_pagein(object, s_pindex, n_blks); | |||||
} | } | ||||
/* | /* | ||||
* swap_pager_swapoff: | * swap_pager_swapoff: | ||||
* | * | ||||
* Page in all of the pages that have been paged out to the | * Page in all of the pages that have been paged out to the | ||||
* given device. The corresponding blocks in the bitmap must be | * given device. The corresponding blocks in the bitmap must be | ||||
* marked as allocated and the device must be flagged SW_CLOSING. | * marked as allocated and the device must be flagged SW_CLOSING. | ||||
▲ Show 20 Lines • Show All 1,212 Lines • Show Last 20 Lines |