Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -235,6 +235,11 @@ &trace_guest_exceptions, 0, "Trap into hypervisor on all guest exceptions and reflect them back"); +static int apply_user_wire_limit; +SYSCTL_INT(_hw_vmm, OID_AUTO, apply_user_wire_limit, CTLFLAG_RWTUN, + &apply_user_wire_limit, 0, + "When starting a wired VM, apply the vm.max_user_wired limit"); + static void vm_free_memmap(struct vm *vm, int ident); static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); @@ -745,7 +750,8 @@ if (flags & VM_MEMMAP_F_WIRED) { error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); + VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES | + (apply_user_wire_limit ? 0 : VM_MAP_WIRE_USER_NOLIM)); if (error != KERN_SUCCESS) { vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); return (EFAULT); Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -384,12 +384,13 @@ * vm_map_wire and vm_map_unwire option flags */ #define VM_MAP_WIRE_SYSTEM 0 /* wiring in a kernel map */ -#define VM_MAP_WIRE_USER 1 /* wiring in a user map */ +#define VM_MAP_WIRE_USER 0x01 /* wiring in a user map */ #define VM_MAP_WIRE_NOHOLES 0 /* region must not have holes */ -#define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */ +#define VM_MAP_WIRE_HOLESOK 0x02 /* region may have holes */ -#define VM_MAP_WIRE_WRITE 4 /* Validate writable. */ +#define VM_MAP_WIRE_WRITE 0x04 /* Validate writable. */ +#define VM_MAP_WIRE_USER_NOLIM 0x08 /* Ignore max_user_wired threshold. */ #ifdef _KERNEL boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -2923,6 +2923,30 @@ entry->wired_count = -1; } +static bool +vm_map_wire_user_count_adj(vm_offset_t start, vm_offset_t end, int flags) +{ + u_long npages, wired; + + if ((flags & VM_MAP_WIRE_USER) == 0) + return (true); + + npages = atop(end - start); + if ((flags & VM_MAP_WIRE_USER_NOLIM) != 0) { + atomic_add_long(&vm_user_wire_count, npages); + return (true); + } + + wired = vm_user_wire_count; + do { + if (npages + wired > vm_page_max_user_wired) { + return (false); + } + } while (!atomic_fcmpset_long(&vm_user_wire_count, &wired, + npages + wired)); + return (true); +} + /* * vm_map_wire: * @@ -2934,7 +2958,6 @@ { vm_map_entry_t entry, first_entry, tmp_entry; vm_offset_t faddr, saved_end, saved_start; - u_long size, wired; u_int last_timestamp; int rv; boolean_t need_wakeup, result, user_wire; @@ -3031,19 +3054,11 @@ saved_start = entry->start; saved_end = entry->end; - if (user_wire) { - size = atop(saved_end - saved_start); - wired = vm_user_wire_count; - do { - if (size + wired > - vm_page_max_user_wired) { - end = saved_start; - rv = KERN_RESOURCE_SHORTAGE; - goto done; - } - } while (!atomic_fcmpset_long( - &vm_user_wire_count, &wired, - size + wired)); + if (!vm_map_wire_user_count_adj(saved_start, saved_end, + flags)) { + end = saved_start; + rv = KERN_RESOURCE_SHORTAGE; + goto done; } /*