diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -426,6 +426,7 @@ #define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */ #define VM_MAP_WIRE_WRITE 4 /* Validate writable. */ +#define VM_MAP_WIRE_PREFAULT 8 /* Pre-populate whole region. */ typedef int vm_map_entry_reader(void *token, vm_map_entry_t addr, vm_map_entry_t dest); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -3486,6 +3486,86 @@ entry->wired_count = -1; } +/* + * vm_map_wire_prefault_entry: + * + * Preallocates pages in object to avoid performance penalties in + * 'vm_fault'. Used for speeding up wirings of large regions. + * + * Will not operate on non-empty entries. + */ + +static int +vm_map_wire_prefault_entry(vm_map_t map, vm_map_entry_t entry) +{ + vm_pindex_t pindex; + vm_offset_t cur, end; + vm_object_t obj = entry->object.vm_object; + int rv = KERN_SUCCESS; + vm_page_t m; +#if VM_NRESERVLEVEL > 0 + const size_t reserv_size = 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT); +#endif + + VM_OBJECT_WLOCK(obj); + /* Check if the entry is empty. */ + m = vm_page_find_least(obj, OFF_TO_IDX(entry->offset)); + if (m != NULL + && m->pindex <= OFF_TO_IDX((entry->end - entry->start) + entry->offset)){ + VM_OBJECT_WUNLOCK(obj); + return (KERN_RESTART); + } + /* Populate entry's object with pages. */ + end = entry->end; + cur = entry->start; + while (cur < end) { + pindex = OFF_TO_IDX((cur - entry->start) + entry->offset); +#if VM_NRESERVLEVEL > 0 + /* First run - try to allocate a superpage. */ + if ((cur & (reserv_size - 1)) == 0 && + (end - cur) >= reserv_size) { + m = vm_page_alloc_contig(obj, pindex, + VM_ALLOC_WIRED | VM_ALLOC_NOBUSY, + (1 << VM_LEVEL_0_ORDER), 0, ~0, 0, 0, + VM_MEMATTR_DEFAULT); + if (m != NULL) { + cur += reserv_size; + continue; + } + } +#endif + /* Fall back to 0-order pages. */ + m = vm_page_alloc(obj, pindex, + VM_ALLOC_WIRED | VM_ALLOC_NOBUSY); + if (m == NULL) { + VM_OBJECT_WUNLOCK(obj); + return (KERN_NO_SPACE); + } + /* We got a 0-order page. */ + cur += PAGE_SIZE; + } + VM_OBJECT_WUNLOCK(obj); + + /* + * Simulate faults to enter the previously + * allocated pages into the physical map. + */ + cur = entry->start; + while (cur < end) { + rv = vm_fault(map, cur, VM_PROT_NONE, VM_FAULT_WIRE, NULL); + if (rv != KERN_SUCCESS) + break; + + VM_OBJECT_RLOCK(obj); + m = vm_page_lookup(obj, + OFF_TO_IDX((cur - entry->start) + entry->offset)); + VM_OBJECT_RUNLOCK(obj); + + cur += pagesizes[m->psind]; + } + return (rv); +} + int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) { @@ -3595,6 +3675,13 @@ vm_map_busy(map); vm_map_unlock(map); + if ((flags & VM_MAP_WIRE_PREFAULT) != 0 && user_wire && + !holes_ok) { + rv = vm_map_wire_prefault_entry(map, entry); + if (rv != KERN_RESTART) + goto fault_done; + /* Fall back to the regular wiring loop. */ + } for (faddr = saved_start; faddr < saved_end; faddr += incr) { /* @@ -3606,6 +3693,7 @@ if (rv != KERN_SUCCESS) break; } +fault_done: vm_map_lock(map); vm_map_unbusy(map); if (last_timestamp + 1 != map->timestamp) {