Page MenuHomeFreeBSD

D15122.id41956.diff
No OneTemporary

D15122.id41956.diff

Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -132,7 +132,7 @@
static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
int ahead);
static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
- int backward, int forward);
+ int backward, int forward, bool obj_locked);
static inline void
release_page(struct faultstate *fs)
@@ -166,11 +166,9 @@
}
static void
-unlock_and_deallocate(struct faultstate *fs)
+unlock_and_deallocate2(struct faultstate *fs)
{
- vm_object_pip_wakeup(fs->object);
- VM_OBJECT_WUNLOCK(fs->object);
if (fs->object != fs->first_object) {
VM_OBJECT_WLOCK(fs->first_object);
vm_page_lock(fs->first_m);
@@ -185,6 +183,15 @@
unlock_vp(fs);
}
+static void
+unlock_and_deallocate(struct faultstate *fs)
+{
+
+ vm_object_pip_wakeup(fs->object);
+ VM_OBJECT_WUNLOCK(fs->object);
+ unlock_and_deallocate2(fs);
+}
+
static void
vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
vm_prot_t fault_type, int fault_flags, bool set_wd)
@@ -320,9 +327,9 @@
return (rv);
vm_fault_fill_hold(m_hold, m);
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false);
- VM_OBJECT_RUNLOCK(fs->first_object);
if (psind == 0 && !wired)
- vm_fault_prefault(fs, vaddr, PFBAK, PFFOR);
+ vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true);
+ VM_OBJECT_RUNLOCK(fs->first_object);
vm_map_lookup_done(fs->map, fs->entry);
curthread->td_ru.ru_minflt++;
return (KERN_SUCCESS);
@@ -1248,6 +1255,7 @@
*/
KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
("vm_fault: page %p partially invalid", fs.m));
+ vm_object_pip_wakeup(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
/*
@@ -1262,8 +1270,8 @@
wired == 0)
vm_fault_prefault(&fs, vaddr,
faultcount > 0 ? behind : PFBAK,
- faultcount > 0 ? ahead : PFFOR);
- VM_OBJECT_WLOCK(fs.object);
+ faultcount > 0 ? ahead : PFFOR, false);
+
vm_page_lock(fs.m);
/*
@@ -1279,13 +1287,13 @@
*m_hold = fs.m;
vm_page_hold(fs.m);
}
+ vm_page_xunbusy_maybelocked(fs.m);
vm_page_unlock(fs.m);
- vm_page_xunbusy(fs.m);
/*
* Unlock everything, and return
*/
- unlock_and_deallocate(&fs);
+ unlock_and_deallocate2(&fs);
if (hardfault) {
VM_CNT_INC(v_io_faults);
curthread->td_ru.ru_majflt++;
@@ -1395,7 +1403,7 @@
*/
static void
vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
- int backward, int forward)
+ int backward, int forward, bool obj_locked)
{
pmap_t pmap;
vm_map_entry_t entry;
@@ -1441,7 +1449,8 @@
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = entry->object.vm_object;
- VM_OBJECT_RLOCK(lobject);
+ if (!obj_locked)
+ VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
@@ -1449,17 +1458,20 @@
0, ("vm_fault_prefault: unaligned object offset"));
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_RLOCK(backing_object);
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
lobject = backing_object;
}
if (m == NULL) {
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
- VM_OBJECT_RUNLOCK(lobject);
+ if (!obj_locked || lobject != entry->object.vm_object)
+ VM_OBJECT_RUNLOCK(lobject);
}
}
Index: sys/vm/vm_object.c
===================================================================
--- sys/vm/vm_object.c
+++ sys/vm/vm_object.c
@@ -724,6 +724,7 @@
VM_OBJECT_ASSERT_WLOCKED(object);
+restart:
mtx = NULL;
/*
@@ -733,13 +734,19 @@
* the object, the page and object are reset to any empty state.
*/
TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
- vm_page_assert_unbusied(p);
if ((object->flags & OBJ_UNMANAGED) == 0)
/*
* vm_page_free_prep() only needs the page
* lock for managed pages.
*/
vm_page_change_lock(p, &mtx);
+ if (vm_page_busied(p)) {
+ vm_page_change_lock(p, &mtx);
+ VM_OBJECT_WUNLOCK(object);
+ vm_page_busy_sleep(p, "termbu", false);
+ VM_OBJECT_WLOCK(object);
+ goto restart;
+ }
p->object = NULL;
if (p->wire_count != 0)
continue;
@@ -1491,7 +1498,6 @@
backing_object = object->backing_object;
VM_OBJECT_ASSERT_WLOCKED(backing_object);
- KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p));
KASSERT(p == NULL || p->object == object || p->object == backing_object,
("invalid ownership %p %p %p", p, object, backing_object));
if ((op & OBSC_COLLAPSE_NOWAIT) != 0)
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -377,6 +377,7 @@
* Page flags. If changed at any other time than page allocation or
* freeing, the modification must be protected by the vm_page lock.
*/
+#define PG_FREEPREP 0x0001 /* vm_page_free_prep() completed */
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
#define PG_ZERO 0x0008 /* page is zeroed */
#define PG_MARKER 0x0010 /* special queue marker page */
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -3434,6 +3434,8 @@
vm_page_lock_assert(m, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_prep: freeing mapped page %p", m));
+ if ((m->flags & PG_FREEPREP) != 0)
+ return (true);
} else
KASSERT(m->queue == PQ_NONE,
("vm_page_free_prep: unmanaged page %p is queued", m));
@@ -3483,6 +3485,9 @@
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ m->flags |= PG_FREEPREP;
+
#if VM_NRESERVLEVEL > 0
if (vm_reserv_free_page(m))
return (false);

File Metadata

Mime Type
text/plain
Expires
Fri, Nov 28, 8:49 AM (6 h, 44 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26279190
Default Alt Text
D15122.id41956.diff (6 KB)

Event Timeline