Changeset View
Standalone View
sys/vm/swap_pager.c
Show First 20 Lines • Show All 412 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure | * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure | ||||
* calls hooked from other parts of the VM system and do not appear here. | * calls hooked from other parts of the VM system and do not appear here. | ||||
* (see vm/swap_pager.h). | * (see vm/swap_pager.h). | ||||
*/ | */ | ||||
static vm_object_t | static vm_object_t | ||||
swap_pager_alloc(void *handle, vm_ooffset_t size, | swap_pager_alloc(void *handle, vm_ooffset_t size, | ||||
vm_prot_t prot, vm_ooffset_t offset, struct ucred *); | vm_prot_t prot, vm_ooffset_t offset, struct ucred *); | ||||
static vm_object_t | |||||
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, | |||||
vm_prot_t prot, vm_ooffset_t offset, struct ucred *); | |||||
static void swap_pager_dealloc(vm_object_t object); | static void swap_pager_dealloc(vm_object_t object); | ||||
static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *, | static int swap_pager_getpages(vm_object_t, vm_page_t *, int, int *, | ||||
int *); | int *); | ||||
static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, | static int swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, | ||||
int *, pgo_getpages_iodone_t, void *); | int *, pgo_getpages_iodone_t, void *); | ||||
static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); | static void swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); | ||||
static boolean_t | static boolean_t | ||||
swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); | swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after); | ||||
static void swap_pager_init(void); | static void swap_pager_init(void); | ||||
static void swap_pager_unswapped(vm_page_t); | static void swap_pager_unswapped(vm_page_t); | ||||
static void swap_pager_swapoff(struct swdevt *sp); | static void swap_pager_swapoff(struct swdevt *sp); | ||||
static void swap_pager_update_writecount(vm_object_t object, | static void swap_pager_update_writecount(vm_object_t object, | ||||
vm_offset_t start, vm_offset_t end); | vm_offset_t start, vm_offset_t end); | ||||
static void swap_pager_release_writecount(vm_object_t object, | static void swap_pager_release_writecount(vm_object_t object, | ||||
vm_offset_t start, vm_offset_t end); | vm_offset_t start, vm_offset_t end); | ||||
static void swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, | |||||
bool *vp_heldp); | |||||
static void swap_pager_freespace(vm_object_t object, vm_pindex_t start, | |||||
vm_size_t size); | |||||
struct pagerops swappagerops = { | const struct pagerops swappagerops = { | ||||
markj: I guess pagerops should be annotated __read_mostly. | |||||
Done Inline ActionsI think they should be marked const and go into .rodata instead. kib: I think they should be marked const and go into .rodata instead. | |||||
.pgo_init = swap_pager_init, /* early system initialization of pager */ | .pgo_init = swap_pager_init, /* early system initialization of pager */ | ||||
.pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ | .pgo_alloc = swap_pager_alloc, /* allocate an OBJT_SWAP object */ | ||||
.pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ | .pgo_dealloc = swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ | ||||
.pgo_getpages = swap_pager_getpages, /* pagein */ | .pgo_getpages = swap_pager_getpages, /* pagein */ | ||||
.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */ | .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */ | ||||
.pgo_putpages = swap_pager_putpages, /* pageout */ | .pgo_putpages = swap_pager_putpages, /* pageout */ | ||||
.pgo_haspage = swap_pager_haspage, /* get backing store status for page */ | .pgo_haspage = swap_pager_haspage, /* get backing store status for page */ | ||||
.pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ | .pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */ | ||||
.pgo_update_writecount = swap_pager_update_writecount, | .pgo_update_writecount = swap_pager_update_writecount, | ||||
.pgo_release_writecount = swap_pager_release_writecount, | .pgo_release_writecount = swap_pager_release_writecount, | ||||
.pgo_freespace = swap_pager_freespace, | |||||
}; | }; | ||||
const struct pagerops swaptmpfspagerops = { | |||||
.pgo_alloc = swap_tmpfs_pager_alloc, | |||||
.pgo_dealloc = swap_pager_dealloc, | |||||
.pgo_getpages = swap_pager_getpages, | |||||
.pgo_getpages_async = swap_pager_getpages_async, | |||||
.pgo_putpages = swap_pager_putpages, | |||||
.pgo_haspage = swap_pager_haspage, | |||||
.pgo_pageunswapped = swap_pager_unswapped, | |||||
.pgo_update_writecount = swap_pager_update_writecount, | |||||
.pgo_release_writecount = swap_pager_release_writecount, | |||||
.pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, | |||||
.pgo_mightbedirty = vm_object_mightbedirty_, | |||||
.pgo_getvp = swap_tmpfs_pager_getvp, | |||||
.pgo_freespace = swap_pager_freespace, | |||||
}; | |||||
/* | /* | ||||
* swap_*() routines are externally accessible. swp_*() routines are | * swap_*() routines are externally accessible. swp_*() routines are | ||||
* internal. | * internal. | ||||
*/ | */ | ||||
static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ | static int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ | ||||
static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ | static int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ | ||||
SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0, | SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0, | ||||
▲ Show 20 Lines • Show All 192 Lines • ▼ Show 20 Lines | swap_pager_swap_init(void) | ||||
swap_maxpages = n * SWAP_META_PAGES; | swap_maxpages = n * SWAP_META_PAGES; | ||||
swzone = n * sizeof(struct swblk); | swzone = n * sizeof(struct swblk); | ||||
if (!uma_zone_reserve_kva(swpctrie_zone, n)) | if (!uma_zone_reserve_kva(swpctrie_zone, n)) | ||||
printf("Cannot reserve swap pctrie zone, " | printf("Cannot reserve swap pctrie zone, " | ||||
"reduce kern.maxswzone.\n"); | "reduce kern.maxswzone.\n"); | ||||
} | } | ||||
static vm_object_t | static vm_object_t | ||||
swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size, | swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred, | ||||
vm_ooffset_t offset) | vm_ooffset_t size, vm_ooffset_t offset) | ||||
{ | { | ||||
vm_object_t object; | vm_object_t object; | ||||
if (cred != NULL) { | if (cred != NULL) { | ||||
if (!swap_reserve_by_cred(size, cred)) | if (!swap_reserve_by_cred(size, cred)) | ||||
return (NULL); | return (NULL); | ||||
crhold(cred); | crhold(cred); | ||||
} | } | ||||
/* | /* | ||||
* The un_pager.swp.swp_blks trie is initialized by | * The un_pager.swp.swp_blks trie is initialized by | ||||
* vm_object_allocate() to ensure the correct order of | * vm_object_allocate() to ensure the correct order of | ||||
* visibility to other threads. | * visibility to other threads. | ||||
*/ | */ | ||||
object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset + | object = vm_object_allocate(otype, OFF_TO_IDX(offset + | ||||
PAGE_MASK + size)); | PAGE_MASK + size)); | ||||
object->un_pager.swp.writemappings = 0; | object->un_pager.swp.writemappings = 0; | ||||
object->handle = handle; | object->handle = handle; | ||||
if (cred != NULL) { | if (cred != NULL) { | ||||
object->cred = cred; | object->cred = cred; | ||||
object->charge = size; | object->charge = size; | ||||
} | } | ||||
Show All 22 Lines | if (handle != NULL) { | ||||
* Reference existing named region or allocate new one. There | * Reference existing named region or allocate new one. There | ||||
* should not be a race here against swp_pager_meta_build() | * should not be a race here against swp_pager_meta_build() | ||||
* as called from vm_page_remove() in regards to the lookup | * as called from vm_page_remove() in regards to the lookup | ||||
* of the handle. | * of the handle. | ||||
*/ | */ | ||||
sx_xlock(&sw_alloc_sx); | sx_xlock(&sw_alloc_sx); | ||||
object = vm_pager_object_lookup(NOBJLIST(handle), handle); | object = vm_pager_object_lookup(NOBJLIST(handle), handle); | ||||
if (object == NULL) { | if (object == NULL) { | ||||
object = swap_pager_alloc_init(handle, cred, size, | object = swap_pager_alloc_init(OBJT_SWAP, handle, cred, | ||||
offset); | size, offset); | ||||
if (object != NULL) { | if (object != NULL) { | ||||
TAILQ_INSERT_TAIL(NOBJLIST(object->handle), | TAILQ_INSERT_TAIL(NOBJLIST(object->handle), | ||||
object, pager_object_list); | object, pager_object_list); | ||||
} | } | ||||
} | } | ||||
sx_xunlock(&sw_alloc_sx); | sx_xunlock(&sw_alloc_sx); | ||||
} else { | } else { | ||||
object = swap_pager_alloc_init(handle, cred, size, offset); | object = swap_pager_alloc_init(OBJT_SWAP, handle, cred, | ||||
size, offset); | |||||
} | } | ||||
return (object); | return (object); | ||||
} | } | ||||
static vm_object_t | |||||
swap_tmpfs_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, | |||||
vm_ooffset_t offset, struct ucred *cred) | |||||
{ | |||||
vm_object_t object; | |||||
MPASS(handle == NULL); | |||||
object = swap_pager_alloc_init(OBJT_SWAP_TMPFS, handle, cred, | |||||
size, offset); | |||||
return (object); | |||||
} | |||||
/* | /* | ||||
* SWAP_PAGER_DEALLOC() - remove swap metadata from object | * SWAP_PAGER_DEALLOC() - remove swap metadata from object | ||||
* | * | ||||
* The swap backing for the object is destroyed. The code is | * The swap backing for the object is destroyed. The code is | ||||
* designed such that we can reinstantiate it later, but this | * designed such that we can reinstantiate it later, but this | ||||
* routine is typically called only when the entire object is | * routine is typically called only when the entire object is | ||||
* about to be destroyed. | * about to be destroyed. | ||||
* | * | ||||
Show All 24 Lines | swap_pager_dealloc(vm_object_t object) | ||||
/* | /* | ||||
* Free all remaining metadata. We only bother to free it from | * Free all remaining metadata. We only bother to free it from | ||||
* the swap meta data. We do not attempt to free swapblk's still | * the swap meta data. We do not attempt to free swapblk's still | ||||
* associated with vm_page_t's for this object. We do not care | * associated with vm_page_t's for this object. We do not care | ||||
* if paging is still in progress on some objects. | * if paging is still in progress on some objects. | ||||
*/ | */ | ||||
swp_pager_meta_free_all(object); | swp_pager_meta_free_all(object); | ||||
object->handle = NULL; | object->handle = NULL; | ||||
object->type = OBJT_DEAD; | object->type = OBJT_DEAD; | ||||
vm_object_clear_flag(object, OBJ_SWAP); | |||||
Done Inline ActionsShould we clear OBJ_SWAP as well? Otherwise the checks of object->type that were converted to checks of object->flags need to be audited to make sure they handle dead objects correctly. markj: Should we clear OBJ_SWAP as well? Otherwise the checks of object->type that were converted to… | |||||
Done Inline ActionsDo you think that such places could exist? swap_pager_dealloc() is called when object refcount == 0, which for non-vnode objects must imply that no one else except our caller has a pointer to the object. Hmm, I think we can only reach destroyed swap object through the vm_object_list iteration, and then the only such function would be swap_pager_swapoff(). It rechecks OBJ_DEAD after obtaining object lock. Ok, I will add the cleaning of the flag. kib: Do you think that such places could exist?
swap_pager_dealloc() is called when object refcount… | |||||
Done Inline ActionsIt is not obvious to me that the object checks in vm_page_reclaim_run() are sufficient, for example. vm_object_terminate() does not necessarily remove all pages (see the OBJ_PG_DTOR flag). This is a contrived example though, OBJ_PG_DTOR has only a single user today. Really my point is that some state is now spread around multiple fields, so we should try to minimize the set of valid combinations. To be honest, I somewhat dislike OBJ_SWAP. Instead of having a new object type, did you consider using a tuple <obj->type, obj->flags> to select pager methods? Or, is it really painful to check obj->type == OBJT_SWAP || obj->type == OBJT_SWAP_TMPFS inline? markj: It is not obvious to me that the object checks in vm_page_reclaim_run() are sufficient, for… | |||||
Done Inline ActionsPlan is to (try to) make OBJT_SWAP_TMPFS pager kldloadable from tmpfs.ko. Hardcoding the type in sys/vm would be undesirable. kib: Plan is to (try to) make OBJT_SWAP_TMPFS pager kldloadable from tmpfs.ko. Hardcoding the type… | |||||
} | } | ||||
/************************************************************************ | /************************************************************************ | ||||
* SWAP PAGER BITMAP ROUTINES * | * SWAP PAGER BITMAP ROUTINES * | ||||
************************************************************************/ | ************************************************************************/ | ||||
/* | /* | ||||
* SWP_PAGER_GETSWAPSPACE() - allocate raw swap space | * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space | ||||
▲ Show 20 Lines • Show All 154 Lines • ▼ Show 20 Lines | sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS) | ||||
sbuf_delete(&sbuf); | sbuf_delete(&sbuf); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page | * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page | ||||
* range within an object. | * range within an object. | ||||
* | * | ||||
* This is a globally accessible routine. | |||||
* | |||||
* This routine removes swapblk assignments from swap metadata. | * This routine removes swapblk assignments from swap metadata. | ||||
* | * | ||||
* The external callers of this routine typically have already destroyed | * The external callers of this routine typically have already destroyed | ||||
* or renamed vm_page_t's associated with this range in the object so | * or renamed vm_page_t's associated with this range in the object so | ||||
* we should be ok. | * we should be ok. | ||||
* | * | ||||
* The object must be locked. | * The object must be locked. | ||||
*/ | */ | ||||
void | static void | ||||
swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) | swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size) | ||||
{ | { | ||||
swp_pager_meta_free(object, start, size); | swp_pager_meta_free(object, start, size); | ||||
} | } | ||||
/* | /* | ||||
* SWAP_PAGER_RESERVE() - reserve swap blocks in object | * SWAP_PAGER_RESERVE() - reserve swap blocks in object | ||||
Show All 33 Lines | |||||
} | } | ||||
static bool | static bool | ||||
swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject, | swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject, | ||||
vm_pindex_t pindex, daddr_t addr) | vm_pindex_t pindex, daddr_t addr) | ||||
{ | { | ||||
daddr_t dstaddr; | daddr_t dstaddr; | ||||
KASSERT(srcobject->type == OBJT_SWAP, | KASSERT((srcobject->flags & OBJ_SWAP) != 0, | ||||
("%s: Srcobject not swappable", __func__)); | ("%s: Srcobject not swappable", __func__)); | ||||
if (dstobject->type == OBJT_SWAP && | if ((dstobject->flags & OBJ_SWAP) != 0 && | ||||
swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) { | swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) { | ||||
/* Caller should destroy the source block. */ | /* Caller should destroy the source block. */ | ||||
return (false); | return (false); | ||||
} | } | ||||
/* | /* | ||||
* Destination has no swapblk and is not resident, transfer source. | * Destination has no swapblk and is not resident, transfer source. | ||||
* swp_pager_meta_build() can sleep. | * swp_pager_meta_build() can sleep. | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | if (destroysource) { | ||||
swp_pager_meta_free_all(srcobject); | swp_pager_meta_free_all(srcobject); | ||||
/* | /* | ||||
* Reverting the type is not necessary, the caller is going | * Reverting the type is not necessary, the caller is going | ||||
* to destroy srcobject directly, but I'm doing it here | * to destroy srcobject directly, but I'm doing it here | ||||
* for consistency since we've removed the object from its | * for consistency since we've removed the object from its | ||||
* queues. | * queues. | ||||
*/ | */ | ||||
srcobject->type = OBJT_DEFAULT; | srcobject->type = OBJT_DEFAULT; | ||||
vm_object_clear_flag(srcobject, OBJ_SWAP); | |||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* SWAP_PAGER_HASPAGE() - determine if we have good backing store for | * SWAP_PAGER_HASPAGE() - determine if we have good backing store for | ||||
* the requested page. | * the requested page. | ||||
* | * | ||||
* We determine whether good backing store exists for the requested | * We determine whether good backing store exists for the requested | ||||
* page and return TRUE if it does, FALSE if it doesn't. | * page and return TRUE if it does, FALSE if it doesn't. | ||||
* | * | ||||
* If TRUE, we also try to determine how much valid, contiguous backing | * If TRUE, we also try to determine how much valid, contiguous backing | ||||
* store exists before and after the requested page. | * store exists before and after the requested page. | ||||
*/ | */ | ||||
static boolean_t | static boolean_t | ||||
swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, | swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, | ||||
int *after) | int *after) | ||||
{ | { | ||||
daddr_t blk, blk0; | daddr_t blk, blk0; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_LOCKED(object); | VM_OBJECT_ASSERT_LOCKED(object); | ||||
KASSERT(object->type == OBJT_SWAP, | KASSERT((object->flags & OBJ_SWAP) != 0, | ||||
("%s: object not swappable", __func__)); | ("%s: object not swappable", __func__)); | ||||
/* | /* | ||||
* do we have good backing store at the requested index ? | * do we have good backing store at the requested index ? | ||||
*/ | */ | ||||
blk0 = swp_pager_meta_lookup(object, pindex); | blk0 = swp_pager_meta_lookup(object, pindex); | ||||
if (blk0 == SWAPBLK_NONE) { | if (blk0 == SWAPBLK_NONE) { | ||||
if (before) | if (before) | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | swap_pager_unswapped(vm_page_t m) | ||||
if ((m->a.flags & PGA_SWAP_FREE) != 0) | if ((m->a.flags & PGA_SWAP_FREE) != 0) | ||||
counter_u64_add(swap_free_completed, 1); | counter_u64_add(swap_free_completed, 1); | ||||
vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE); | vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE); | ||||
/* | /* | ||||
* The meta data only exists if the object is OBJT_SWAP | * The meta data only exists if the object is OBJT_SWAP | ||||
* and even then might not be allocated yet. | * and even then might not be allocated yet. | ||||
*/ | */ | ||||
KASSERT(m->object->type == OBJT_SWAP, | KASSERT((m->object->flags & OBJ_SWAP) != 0, | ||||
("Free object not swappable")); | ("Free object not swappable")); | ||||
sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks, | sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks, | ||||
rounddown(m->pindex, SWAP_META_PAGES)); | rounddown(m->pindex, SWAP_META_PAGES)); | ||||
if (sb == NULL) | if (sb == NULL) | ||||
return; | return; | ||||
if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE) | if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE) | ||||
return; | return; | ||||
Show All 21 Lines | swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count, | ||||
vm_page_t bm, mpred, msucc, p; | vm_page_t bm, mpred, msucc, p; | ||||
vm_pindex_t pindex; | vm_pindex_t pindex; | ||||
daddr_t blk; | daddr_t blk; | ||||
int i, maxahead, maxbehind, reqcount; | int i, maxahead, maxbehind, reqcount; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
reqcount = count; | reqcount = count; | ||||
KASSERT(object->type == OBJT_SWAP, | KASSERT((object->flags & OBJ_SWAP) != 0, | ||||
("%s: object not swappable", __func__)); | ("%s: object not swappable", __func__)); | ||||
if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) { | if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) { | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
return (VM_PAGER_FAIL); | return (VM_PAGER_FAIL); | ||||
} | } | ||||
KASSERT(reqcount - 1 <= maxahead, | KASSERT(reqcount - 1 <= maxahead, | ||||
("page count %d extends beyond swap block", reqcount)); | ("page count %d extends beyond swap block", reqcount)); | ||||
▲ Show 20 Lines • Show All 217 Lines • ▼ Show 20 Lines | KASSERT(count == 0 || ma[0]->object == object, | ||||
("%s: object mismatch %p/%p", | ("%s: object mismatch %p/%p", | ||||
__func__, object, ma[0]->object)); | __func__, object, ma[0]->object)); | ||||
/* | /* | ||||
* Step 1 | * Step 1 | ||||
* | * | ||||
* Turn object into OBJT_SWAP. Force sync if not a pageout process. | * Turn object into OBJT_SWAP. Force sync if not a pageout process. | ||||
*/ | */ | ||||
if (object->type != OBJT_SWAP) { | if ((object->flags & OBJ_SWAP) == 0) { | ||||
addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE); | addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE); | ||||
KASSERT(addr == SWAPBLK_NONE, | KASSERT(addr == SWAPBLK_NONE, | ||||
("unexpected object swap block")); | ("unexpected object swap block")); | ||||
} | } | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0; | async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0; | ||||
swp_pager_init_freerange(&s_free, &n_free); | swp_pager_init_freerange(&s_free, &n_free); | ||||
▲ Show 20 Lines • Show All 288 Lines • ▼ Show 20 Lines | |||||
swap_pager_swapped_pages(vm_object_t object) | swap_pager_swapped_pages(vm_object_t object) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
vm_pindex_t pi; | vm_pindex_t pi; | ||||
u_long res; | u_long res; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_LOCKED(object); | VM_OBJECT_ASSERT_LOCKED(object); | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
return (0); | return (0); | ||||
for (res = 0, pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | for (res = 0, pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | ||||
&object->un_pager.swp.swp_blks, pi)) != NULL; | &object->un_pager.swp.swp_blks, pi)) != NULL; | ||||
pi = sb->p + SWAP_META_PAGES) { | pi = sb->p + SWAP_META_PAGES) { | ||||
for (i = 0; i < SWAP_META_PAGES; i++) { | for (i = 0; i < SWAP_META_PAGES; i++) { | ||||
if (sb->d[i] != SWAPBLK_NONE) | if (sb->d[i] != SWAPBLK_NONE) | ||||
res++; | res++; | ||||
Show All 12 Lines | |||||
swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) | swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
vm_page_t m; | vm_page_t m; | ||||
vm_pindex_t pi; | vm_pindex_t pi; | ||||
daddr_t blk; | daddr_t blk; | ||||
int i, nv, rahead, rv; | int i, nv, rahead, rv; | ||||
KASSERT(object->type == OBJT_SWAP, | KASSERT((object->flags & OBJ_SWAP) != 0, | ||||
("%s: Object not swappable", __func__)); | ("%s: Object not swappable", __func__)); | ||||
for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | ||||
&object->un_pager.swp.swp_blks, pi)) != NULL; ) { | &object->un_pager.swp.swp_blks, pi)) != NULL; ) { | ||||
if ((object->flags & OBJ_DEAD) != 0) { | if ((object->flags & OBJ_DEAD) != 0) { | ||||
/* | /* | ||||
* Make sure that pending writes finish before | * Make sure that pending writes finish before | ||||
* returning. | * returning. | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | swap_pager_swapoff(struct swdevt *sp) | ||||
int retries; | int retries; | ||||
sx_assert(&swdev_syscall_lock, SA_XLOCKED); | sx_assert(&swdev_syscall_lock, SA_XLOCKED); | ||||
retries = 0; | retries = 0; | ||||
full_rescan: | full_rescan: | ||||
mtx_lock(&vm_object_list_mtx); | mtx_lock(&vm_object_list_mtx); | ||||
TAILQ_FOREACH(object, &vm_object_list, object_list) { | TAILQ_FOREACH(object, &vm_object_list, object_list) { | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
continue; | continue; | ||||
mtx_unlock(&vm_object_list_mtx); | mtx_unlock(&vm_object_list_mtx); | ||||
/* Depends on type-stability. */ | /* Depends on type-stability. */ | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
/* | /* | ||||
* Dead objects are eventually terminated on their own. | * Dead objects are eventually terminated on their own. | ||||
*/ | */ | ||||
if ((object->flags & OBJ_DEAD) != 0) | if ((object->flags & OBJ_DEAD) != 0) | ||||
goto next_obj; | goto next_obj; | ||||
/* | /* | ||||
* Sync with fences placed after pctrie | * Sync with fences placed after pctrie | ||||
* initialization. We must not access pctrie below | * initialization. We must not access pctrie below | ||||
* unless we checked that our object is swap and not | * unless we checked that our object is swap and not | ||||
* dead. | * dead. | ||||
*/ | */ | ||||
atomic_thread_fence_acq(); | atomic_thread_fence_acq(); | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
goto next_obj; | goto next_obj; | ||||
swap_pager_swapoff_object(sp, object); | swap_pager_swapoff_object(sp, object); | ||||
next_obj: | next_obj: | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
mtx_lock(&vm_object_list_mtx); | mtx_lock(&vm_object_list_mtx); | ||||
} | } | ||||
mtx_unlock(&vm_object_list_mtx); | mtx_unlock(&vm_object_list_mtx); | ||||
▲ Show 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk) | ||||
daddr_t prev_swapblk; | daddr_t prev_swapblk; | ||||
int error, i; | int error, i; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
/* | /* | ||||
* Convert default object to swap object if necessary | * Convert default object to swap object if necessary | ||||
*/ | */ | ||||
if (object->type != OBJT_SWAP) { | if ((object->flags & OBJ_SWAP) == 0) { | ||||
pctrie_init(&object->un_pager.swp.swp_blks); | pctrie_init(&object->un_pager.swp.swp_blks); | ||||
/* | /* | ||||
* Ensure that swap_pager_swapoff()'s iteration over | * Ensure that swap_pager_swapoff()'s iteration over | ||||
* object_list does not see a garbage pctrie. | * object_list does not see a garbage pctrie. | ||||
*/ | */ | ||||
atomic_thread_fence_rel(); | atomic_thread_fence_rel(); | ||||
object->type = OBJT_SWAP; | object->type = OBJT_SWAP; | ||||
vm_object_set_flag(object, OBJ_SWAP); | |||||
object->un_pager.swp.writemappings = 0; | object->un_pager.swp.writemappings = 0; | ||||
KASSERT((object->flags & OBJ_ANON) != 0 || | KASSERT((object->flags & OBJ_ANON) != 0 || | ||||
object->handle == NULL, | object->handle == NULL, | ||||
("default pager %p with handle %p", | ("default pager %p with handle %p", | ||||
object, object->handle)); | object, object->handle)); | ||||
} | } | ||||
rdpi = rounddown(pindex, SWAP_META_PAGES); | rdpi = rounddown(pindex, SWAP_META_PAGES); | ||||
▲ Show 20 Lines • Show All 92 Lines • ▼ Show 20 Lines | swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject, | ||||
vm_pindex_t pindex, vm_pindex_t count) | vm_pindex_t pindex, vm_pindex_t count) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
daddr_t n_free, s_free; | daddr_t n_free, s_free; | ||||
vm_pindex_t offset, last; | vm_pindex_t offset, last; | ||||
int i, limit, start; | int i, limit, start; | ||||
VM_OBJECT_ASSERT_WLOCKED(srcobject); | VM_OBJECT_ASSERT_WLOCKED(srcobject); | ||||
if (srcobject->type != OBJT_SWAP || count == 0) | if ((srcobject->flags & OBJ_SWAP) == 0 || count == 0) | ||||
return; | return; | ||||
swp_pager_init_freerange(&s_free, &n_free); | swp_pager_init_freerange(&s_free, &n_free); | ||||
offset = pindex; | offset = pindex; | ||||
last = pindex + count; | last = pindex + count; | ||||
for (;;) { | for (;;) { | ||||
sb = SWAP_PCTRIE_LOOKUP_GE(&srcobject->un_pager.swp.swp_blks, | sb = SWAP_PCTRIE_LOOKUP_GE(&srcobject->un_pager.swp.swp_blks, | ||||
rounddown(pindex, SWAP_META_PAGES)); | rounddown(pindex, SWAP_META_PAGES)); | ||||
▲ Show 20 Lines • Show All 50 Lines • ▼ Show 20 Lines | |||||
swp_pager_meta_free_all(vm_object_t object) | swp_pager_meta_free_all(vm_object_t object) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
daddr_t n_free, s_free; | daddr_t n_free, s_free; | ||||
vm_pindex_t pindex; | vm_pindex_t pindex; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_WLOCKED(object); | VM_OBJECT_ASSERT_WLOCKED(object); | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
return; | return; | ||||
swp_pager_init_freerange(&s_free, &n_free); | swp_pager_init_freerange(&s_free, &n_free); | ||||
for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE( | ||||
&object->un_pager.swp.swp_blks, pindex)) != NULL;) { | &object->un_pager.swp.swp_blks, pindex)) != NULL;) { | ||||
pindex = sb->p + SWAP_META_PAGES; | pindex = sb->p + SWAP_META_PAGES; | ||||
for (i = 0; i < SWAP_META_PAGES; i++) { | for (i = 0; i < SWAP_META_PAGES; i++) { | ||||
if (sb->d[i] == SWAPBLK_NONE) | if (sb->d[i] == SWAPBLK_NONE) | ||||
Show All 23 Lines | swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex) | ||||
struct swblk *sb; | struct swblk *sb; | ||||
VM_OBJECT_ASSERT_LOCKED(object); | VM_OBJECT_ASSERT_LOCKED(object); | ||||
/* | /* | ||||
* The meta data only exists if the object is OBJT_SWAP | * The meta data only exists if the object is OBJT_SWAP | ||||
* and even then might not be allocated yet. | * and even then might not be allocated yet. | ||||
*/ | */ | ||||
KASSERT(object->type == OBJT_SWAP, | KASSERT((object->flags & OBJ_SWAP) != 0, | ||||
("Lookup object not swappable")); | ("Lookup object not swappable")); | ||||
sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, | sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, | ||||
rounddown(pindex, SWAP_META_PAGES)); | rounddown(pindex, SWAP_META_PAGES)); | ||||
if (sb == NULL) | if (sb == NULL) | ||||
return (SWAPBLK_NONE); | return (SWAPBLK_NONE); | ||||
return (sb->d[pindex % SWAP_META_PAGES]); | return (sb->d[pindex % SWAP_META_PAGES]); | ||||
} | } | ||||
/* | /* | ||||
* Returns the least page index which is greater than or equal to the | * Returns the least page index which is greater than or equal to the | ||||
* parameter pindex and for which there is a swap block allocated. | * parameter pindex and for which there is a swap block allocated. | ||||
* Returns object's size if the object's type is not swap or if there | * Returns object's size if the object's type is not swap or if there | ||||
* are no allocated swap blocks for the object after the requested | * are no allocated swap blocks for the object after the requested | ||||
* pindex. | * pindex. | ||||
*/ | */ | ||||
vm_pindex_t | vm_pindex_t | ||||
swap_pager_find_least(vm_object_t object, vm_pindex_t pindex) | swap_pager_find_least(vm_object_t object, vm_pindex_t pindex) | ||||
{ | { | ||||
struct swblk *sb; | struct swblk *sb; | ||||
int i; | int i; | ||||
VM_OBJECT_ASSERT_LOCKED(object); | VM_OBJECT_ASSERT_LOCKED(object); | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
return (object->size); | return (object->size); | ||||
sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, | sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks, | ||||
rounddown(pindex, SWAP_META_PAGES)); | rounddown(pindex, SWAP_META_PAGES)); | ||||
if (sb == NULL) | if (sb == NULL) | ||||
return (object->size); | return (object->size); | ||||
if (sb->p < pindex) { | if (sb->p < pindex) { | ||||
for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) { | for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) { | ||||
▲ Show 20 Lines • Show All 438 Lines • ▼ Show 20 Lines | vmspace_swap_count(struct vmspace *vmspace) | ||||
map = &vmspace->vm_map; | map = &vmspace->vm_map; | ||||
count = 0; | count = 0; | ||||
VM_MAP_ENTRY_FOREACH(cur, map) { | VM_MAP_ENTRY_FOREACH(cur, map) { | ||||
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) | if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) | ||||
continue; | continue; | ||||
object = cur->object.vm_object; | object = cur->object.vm_object; | ||||
if (object == NULL || object->type != OBJT_SWAP) | if (object == NULL || (object->flags & OBJ_SWAP) == 0) | ||||
continue; | continue; | ||||
VM_OBJECT_RLOCK(object); | VM_OBJECT_RLOCK(object); | ||||
if (object->type != OBJT_SWAP) | if ((object->flags & OBJ_SWAP) == 0) | ||||
goto unlock; | goto unlock; | ||||
pi = OFF_TO_IDX(cur->offset); | pi = OFF_TO_IDX(cur->offset); | ||||
e = pi + OFF_TO_IDX(cur->end - cur->start); | e = pi + OFF_TO_IDX(cur->end - cur->start); | ||||
for (;; pi = sb->p + SWAP_META_PAGES) { | for (;; pi = sb->p + SWAP_META_PAGES) { | ||||
sb = SWAP_PCTRIE_LOOKUP_GE( | sb = SWAP_PCTRIE_LOOKUP_GE( | ||||
&object->un_pager.swp.swp_blks, pi); | &object->un_pager.swp.swp_blks, pi); | ||||
if (sb == NULL || sb->p >= e) | if (sb == NULL || sb->p >= e) | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 375 Lines • ▼ Show 20 Lines | swap_pager_release_writecount(vm_object_t object, vm_offset_t start, | ||||
vm_offset_t end) | vm_offset_t end) | ||||
{ | { | ||||
VM_OBJECT_WLOCK(object); | VM_OBJECT_WLOCK(object); | ||||
KASSERT((object->flags & OBJ_ANON) == 0, | KASSERT((object->flags & OBJ_ANON) == 0, | ||||
("Splittable object with writecount")); | ("Splittable object with writecount")); | ||||
object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start; | object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start; | ||||
VM_OBJECT_WUNLOCK(object); | VM_OBJECT_WUNLOCK(object); | ||||
} | |||||
static void | |||||
swap_tmpfs_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) | |||||
{ | |||||
struct vnode *vp; | |||||
/* | |||||
* Tmpfs VREG node, which was reclaimed, has OBJT_SWAP_TMPFS | |||||
* type, but not OBJ_TMPFS flag. In this case there is no | |||||
* v_writecount to adjust. | |||||
*/ | |||||
VM_OBJECT_RLOCK(object); | |||||
if ((object->flags & OBJ_TMPFS) != 0) { | |||||
vp = object->un_pager.swp.swp_tmpfs; | |||||
if (vp != NULL && vp_heldp != NULL) { | |||||
vhold(vp); | |||||
*vpp = vp; | |||||
*vp_heldp = true; | |||||
} | |||||
} | |||||
VM_OBJECT_RUNLOCK(object); | |||||
} | } |
I guess pagerops should be annotated __read_mostly.