diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -85,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -220,6 +221,21 @@ } } +/* + * Return true if a vm_pager_get_pages() call is needed in order to check + * whether the pager might have a particular page, false if it can be determined + * immediately that the pager can not have a copy. For swap objects, this can + * be checked quickly. + */ +static inline bool +fault_object_needs_getpages(vm_object_t object) +{ + VM_OBJECT_ASSERT_LOCKED(object); + + return ((object->flags & OBJ_SWAP) == 0 || + !pctrie_is_empty(&object->un_pager.swp.swp_blks)); +} + static inline void unlock_map(struct faultstate *fs) { @@ -1406,10 +1422,9 @@ /* * Page is not resident. If the pager might contain the page * or this is the beginning of the search, allocate a new - * page. (Default objects are zero-fill, so there is no real - * pager for them.) + * page. */ - if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT || + if (fs->m == NULL && (fault_object_needs_getpages(fs->object) || fs->object == fs->first_object)) { res = vm_fault_allocate(fs); if (res != FAULT_CONTINUE) @@ -1422,7 +1437,7 @@ * object without dropping the lock to preserve atomicity of * shadow faults. */ - if (fs->object->type != OBJT_DEFAULT) { + if (fault_object_needs_getpages(fs->object)) { /* * At this point, we have either allocated a new page * or found an existing page that is only partially @@ -1841,7 +1856,7 @@ if (!obj_locked) VM_OBJECT_RLOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && - lobject->type == OBJT_DEFAULT && + !fault_object_needs_getpages(lobject) && (backing_object = lobject->backing_object) != NULL) { KASSERT((lobject->backing_object_offset & PAGE_MASK) == 0, ("vm_fault_prefault: unaligned object offset")); diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -202,7 +202,8 @@ #define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */ #define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */ #define OBJ_SHADOWLIST 0x0100 /* Object is on the shadow list. */ -#define OBJ_SWAP 0x0200 /* object swaps */ +#define OBJ_SWAP 0x0200 /* object swaps, type will be OBJT_SWAP + or dynamically registered */ #define OBJ_SPLIT 0x0400 /* object is being split */ #define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */ #define OBJ_COLORED 0x1000 /* pg_color is defined */ diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -244,8 +244,10 @@ object->type = type; object->flags = flags; - if ((flags & OBJ_SWAP) != 0) + if ((flags & OBJ_SWAP) != 0) { pctrie_init(&object->un_pager.swp.swp_blks); + object->un_pager.swp.writemappings = 0; + } /* * Ensure that swap_pager_swapoff() iteration over object_list @@ -473,8 +475,8 @@ else handle = backing_object; object = uma_zalloc(obj_zone, M_WAITOK); - _vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING, - object, handle); + _vm_object_allocate(OBJT_SWAP, size, + OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle); object->cred = cred; object->charge = cred != NULL ? charge : 0; return (object);