Page MenuHomeFreeBSD

D35785.id108099.diff
No OneTemporary

D35785.id108099.diff

Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -85,6 +85,7 @@
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/mutex.h>
+#include <sys/pctrie.h>
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/refcount.h>
@@ -220,6 +221,22 @@
}
}
+/*
+ * Return true if a vm_pager_get_pages() call is needed in order to check
+ * whether the pager might have a particular page, false if it can be determined
+ * immediately that the pager does not have a copy. For swap objects, this can
+ * be checked quickly.
+ */
+static inline bool
+fault_object_needs_getpages(vm_object_t object)
+{
+ VM_OBJECT_ASSERT_LOCKED(object);
+
+ return ((object->flags & (OBJ_ANON | OBJ_SWAP)) !=
+ (OBJ_ANON | OBJ_SWAP) ||
+ !pctrie_is_empty(&object->un_pager.swp.swp_blks));
+}
+
static inline void
unlock_map(struct faultstate *fs)
{
@@ -1416,10 +1433,9 @@
/*
* Page is not resident. If the pager might contain the page
* or this is the beginning of the search, allocate a new
- * page. (Default objects are zero-fill, so there is no real
- * pager for them.)
+ * page.
*/
- if (fs->m == NULL && (fs->object->type != OBJT_DEFAULT ||
+ if (fs->m == NULL && (fault_object_needs_getpages(fs->object) ||
fs->object == fs->first_object)) {
res = vm_fault_allocate(fs);
if (res != FAULT_CONTINUE)
@@ -1432,7 +1448,7 @@
* object without dropping the lock to preserve atomicity of
* shadow faults.
*/
- if (fs->object->type != OBJT_DEFAULT) {
+ if (fault_object_needs_getpages(fs->object)) {
/*
* At this point, we have either allocated a new page
* or found an existing page that is only partially
@@ -1851,7 +1867,7 @@
if (!obj_locked)
VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- lobject->type == OBJT_DEFAULT &&
+ !fault_object_needs_getpages(lobject) &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));
Index: sys/vm/vm_object.h
===================================================================
--- sys/vm/vm_object.h
+++ sys/vm/vm_object.h
@@ -202,7 +202,8 @@
#define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
#define OBJ_SHADOWLIST 0x0100 /* Object is on the shadow list. */
-#define OBJ_SWAP 0x0200 /* object swaps */
+#define OBJ_SWAP 0x0200 /* object swaps, type will be OBJT_SWAP
+ or dynamically registered */
#define OBJ_SPLIT 0x0400 /* object is being split */
#define OBJ_COLLAPSING 0x0800 /* Parent of collapse. */
#define OBJ_COLORED 0x1000 /* pg_color is defined */
Index: sys/vm/vm_object.c
===================================================================
--- sys/vm/vm_object.c
+++ sys/vm/vm_object.c
@@ -244,8 +244,10 @@
object->type = type;
object->flags = flags;
- if ((flags & OBJ_SWAP) != 0)
+ if ((flags & OBJ_SWAP) != 0) {
pctrie_init(&object->un_pager.swp.swp_blks);
+ object->un_pager.swp.writemappings = 0;
+ }
/*
* Ensure that swap_pager_swapoff() iteration over object_list
@@ -473,8 +475,8 @@
else
handle = backing_object;
object = uma_zalloc(obj_zone, M_WAITOK);
- _vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING,
- object, handle);
+ _vm_object_allocate(OBJT_SWAP, size,
+ OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle);
object->cred = cred;
object->charge = cred != NULL ? charge : 0;
return (object);

File Metadata

Mime Type
text/plain
Expires
Mon, Mar 3, 3:43 PM (52 m, 50 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16948057
Default Alt Text
D35785.id108099.diff (3 KB)

Event Timeline