Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F154389533
D51162.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
9 KB
Referenced Files
None
Subscribers
None
D51162.diff
View Options
Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -2101,8 +2101,9 @@
vm_pindex_t dst_pindex, pindex, src_pindex;
vm_prot_t access, prot;
vm_offset_t vaddr;
- vm_page_t dst_m;
- vm_page_t src_m;
+ vm_page_t dst_m, src_m;
+ vm_page_t ma[256];
+ int i, nread, npages;
bool upgrade;
upgrade = src_entry == dst_entry;
@@ -2122,9 +2123,12 @@
access = prot = dst_entry->protection;
if (!upgrade)
access &= ~VM_PROT_WRITE;
+ else
+ access |= PMAP_ENTER_WIRED;
src_object = src_entry->object.vm_object;
src_pindex = OFF_TO_IDX(src_entry->offset);
+ npages = atop(dst_entry->end - dst_entry->start);
if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
dst_object = src_object;
@@ -2135,8 +2139,7 @@
* Doesn't actually shadow anything - we copy the pages
* directly.
*/
- dst_object = vm_object_allocate_anon(atop(dst_entry->end -
- dst_entry->start), NULL, NULL, 0);
+ dst_object = vm_object_allocate_anon(npages, NULL, NULL, 0);
#if VM_NRESERVLEVEL > 0
dst_object->flags |= OBJ_COLORED;
dst_object->pg_color = atop(dst_entry->start);
@@ -2174,20 +2177,18 @@
* regardless of whether they can be written.
*/
vm_page_iter_init(&pages, dst_object);
- for (vaddr = dst_entry->start, dst_pindex = 0;
- vaddr < dst_entry->end;
- vaddr += PAGE_SIZE, dst_pindex++) {
-again:
+ for (dst_pindex = 0; dst_pindex < npages;) {
/*
* Find the page in the source object, and copy it in.
* Because the source is wired down, the page will be
* in memory.
*/
- if (src_object != dst_object)
- VM_OBJECT_RLOCK(src_object);
object = src_object;
+ if (object != dst_object)
+ VM_OBJECT_RLOCK(object);
pindex = src_pindex + dst_pindex;
- while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
+ while ((nread = vm_page_lookup_range(object, pindex,
+ ma, MIN(nitems(ma), npages - dst_pindex))) == 0 &&
(backing_object = object->backing_object) != NULL) {
/*
* Unless the source mapping is read-only or
@@ -2208,7 +2209,7 @@
VM_OBJECT_RUNLOCK(object);
object = backing_object;
}
- KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing"));
+ KASSERT(nread != 0, ("vm_fault_copy_entry: page missing"));
if (object != dst_object) {
/*
@@ -2216,85 +2217,98 @@
*/
pindex = (src_object == dst_object ? src_pindex : 0) +
dst_pindex;
- dst_m = vm_page_alloc_iter(dst_object, pindex,
- VM_ALLOC_NORMAL, &pages);
- if (dst_m == NULL) {
- VM_OBJECT_WUNLOCK(dst_object);
- VM_OBJECT_RUNLOCK(object);
- vm_wait(dst_object);
- VM_OBJECT_WLOCK(dst_object);
- pctrie_iter_reset(&pages);
- goto again;
- }
+ for (i = 0; i < nread; i++) {
+ dst_m = vm_page_alloc_iter(dst_object,
+ pindex + i, VM_ALLOC_NORMAL, &pages);
+ if (dst_m == NULL) {
+ VM_OBJECT_WUNLOCK(dst_object);
+ vm_wait(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
+ pctrie_iter_reset(&pages);
+ break;
+ }
- /*
- * See the comment in vm_fault_cow().
- */
- if (src_object == dst_object &&
- (object->flags & OBJ_ONEMAPPING) == 0)
- pmap_remove_all(src_m);
- pmap_copy_page(src_m, dst_m);
+ /*
+ * See the comment in vm_fault_cow().
+ */
+ src_m = ma[i];
+ if (src_object == dst_object &&
+ (object->flags & OBJ_ONEMAPPING) == 0)
+ pmap_remove_all(src_m);
+ pmap_copy_page(src_m, dst_m);
- /*
- * The object lock does not guarantee that "src_m" will
- * transition from invalid to valid, but it does ensure
- * that "src_m" will not transition from valid to
- * invalid.
- */
- dst_m->dirty = dst_m->valid = src_m->valid;
- VM_OBJECT_RUNLOCK(object);
- } else {
- dst_m = src_m;
- if (vm_page_busy_acquire(
- dst_m, VM_ALLOC_WAITFAIL) == 0) {
- pctrie_iter_reset(&pages);
- goto again;
- }
- if (dst_m->pindex >= dst_object->size) {
/*
- * We are upgrading. Index can occur
- * out of bounds if the object type is
- * vnode and the file was truncated.
+ * The object lock does not guarantee that
+ * "src_m" will transition from invalid to
+ * valid, but it does ensure that "src_m" will
+ * not transition from valid to invalid.
*/
- vm_page_xunbusy(dst_m);
- break;
+ dst_m->dirty = dst_m->valid = src_m->valid;
+ if (upgrade)
+ vm_page_unwire(src_m, PQ_INACTIVE);
+ ma[i] = dst_m;
+ }
+ VM_OBJECT_RUNLOCK(object);
+ } else {
+ for (i = 0; i < nread; i++) {
+ dst_m = ma[i];
+ if (vm_page_busy_acquire(
+ dst_m, VM_ALLOC_WAITFAIL) == 0) {
+ pctrie_iter_reset(&pages);
+ break;
+ }
+ if (dst_m->pindex >= dst_object->size) {
+ /*
+ * We are upgrading. Index can occur
+ * out of bounds if the object type is
+ * vnode and the file was truncated.
+ */
+ vm_page_xunbusy(dst_m);
+ break;
+ }
}
}
+ nread = i;
+ for (i = 0; i < nread; i++) {
+ dst_m = ma[i];
- /*
- * Enter it in the pmap. If a wired, copy-on-write
- * mapping is being replaced by a write-enabled
- * mapping, then wire that new mapping.
- *
- * The page can be invalid if the user called
- * msync(MS_INVALIDATE) or truncated the backing vnode
- * or shared memory object. In this case, do not
- * insert it into pmap, but still do the copy so that
- * all copies of the wired map entry have similar
- * backing pages.
- */
- if (vm_page_all_valid(dst_m)) {
+ /*
+ * Enter it in the pmap. If a wired, copy-on-write
+ * mapping is being replaced by a write-enabled mapping,
+ * then wire that new mapping.
+ *
+ * The page can be invalid if the user called
+ * msync(MS_INVALIDATE) or truncated the backing vnode
+ * or shared memory object. In this case, do not insert
+ * it into pmap, but still do the copy so that all
+ * copies of the wired map entry have similar backing
+ * pages.
+ */
+ if (!vm_page_all_valid(dst_m))
+ continue;
VM_OBJECT_WUNLOCK(dst_object);
+ vaddr = dst_entry->start + ptoa(dst_pindex + i);
pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
- access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
+ access, 0);
VM_OBJECT_WLOCK(dst_object);
- }
- /*
- * Mark it no longer busy, and put it on the active list.
- */
- if (upgrade) {
- if (src_m != dst_m) {
- vm_page_unwire(src_m, PQ_INACTIVE);
- vm_page_wire(dst_m);
+ /*
+ * Mark it no longer busy, and put it on the active
+ * list.
+ */
+ if (upgrade) {
+ if (object != dst_object)
+ vm_page_wire(dst_m);
+ else {
+ KASSERT(vm_page_wired(dst_m),
+ ("dst_m %p is not wired", dst_m));
+ }
} else {
- KASSERT(vm_page_wired(dst_m),
- ("dst_m %p is not wired", dst_m));
+ vm_page_activate(dst_m);
}
- } else {
- vm_page_activate(dst_m);
+ vm_page_xunbusy(dst_m);
}
- vm_page_xunbusy(dst_m);
+ dst_pindex += nread;
}
VM_OBJECT_WUNLOCK(dst_object);
if (upgrade) {
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -618,6 +618,8 @@
vm_object_t new_object, vm_pindex_t new_pindex);
void vm_page_launder(vm_page_t m);
vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
+int vm_page_lookup_range(vm_object_t, vm_pindex_t index,
+ vm_page_t ma[], int count);
vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
void vm_page_pqbatch_drain(void);
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -1752,6 +1752,22 @@
return (vm_radix_lookup(&object->rtree, pindex));
}
+/*
+ * vm_page_lookup_range:
+ *
+ * Returns number of the pages read into ma, beginning at the given index.
+ *
+ * The object must be locked.
+ */
+int
+vm_page_lookup_range(vm_object_t object, vm_pindex_t pindex,
+ vm_page_t ma[], int count)
+{
+
+ VM_OBJECT_ASSERT_LOCKED(object);
+ return (vm_radix_lookup_range(&object->rtree, pindex, ma, count));
+}
+
/*
* vm_page_iter_init:
*
Index: sys/vm/vm_radix.h
===================================================================
--- sys/vm/vm_radix.h
+++ sys/vm/vm_radix.h
@@ -101,6 +101,20 @@
return (VM_RADIX_PCTRIE_LOOKUP_UNLOCKED(&rtree->rt_trie, index));
}
+/*
+ * Returns the number of contiguous, non-NULL pages read into the ma[]
+ * array.
+ *
+ * Requires that access be externally synchronized by a lock.
+ */
+static __inline int
+vm_radix_lookup_range(struct vm_radix *rtree, vm_pindex_t index,
+ vm_page_t ma[], int count)
+{
+ return (VM_RADIX_PCTRIE_LOOKUP_RANGE(&rtree->rt_trie, index,
+ ma, count));
+}
+
/*
* Returns the number of contiguous, non-NULL pages read into the ma[]
* array, without requiring an external lock.
@@ -115,7 +129,9 @@
/*
* Returns the number of contiguous, non-NULL pages read into the ma[]
- * array, without requiring an external lock.
+ * array.
+ *
+ * Requires that access be externally synchronized by a lock.
*/
static __inline int
vm_radix_iter_lookup_range(struct pctrie_iter *pages, vm_pindex_t index,
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Apr 29, 7:16 AM (16 h, 4 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
32330929
Default Alt Text
D51162.diff (9 KB)
Attached To
Mode
D51162: vm_fault: lookup page ranges for copy_entry
Attached
Detach File
Event Timeline
Log In to Comment