Index: head/sys/vm/vm.h =================================================================== --- head/sys/vm/vm.h +++ head/sys/vm/vm.h @@ -80,7 +80,9 @@ #define VM_PROT_WRITE ((vm_prot_t) 0x02) #define VM_PROT_EXECUTE ((vm_prot_t) 0x04) #define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */ -#define VM_PROT_FAULT_LOOKUP ((vm_prot_t) 0x010) +#define VM_PROT_PRIV_FLAG ((vm_prot_t) 0x10) +#define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG +#define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG /* same to save bits */ #define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) #define VM_PROT_RW (VM_PROT_READ|VM_PROT_WRITE) Index: head/sys/vm/vm_fault.c =================================================================== --- head/sys/vm/vm_fault.c +++ head/sys/vm/vm_fault.c @@ -1524,7 +1524,18 @@ * page was mapped at the specified virtual address or that * mapping had insufficient permissions. Attempt to fault in * and hold these pages. + * + * If vm_fault_disable_pagefaults() was called, + * i.e., TDP_NOFAULTING is set, we must not sleep nor + * acquire MD VM locks, which means we must not call + * vm_fault_hold(). Some (out of tree) callers mark + * too wide a code area with vm_fault_disable_pagefaults() + * already, use the VM_PROT_QUICK_NOFAULT flag to request + * the proper behaviour explicitly. */ + if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && + (curthread->td_pflags & TDP_NOFAULTING) != 0) + goto error; for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) if (*mp == NULL && vm_fault_hold(map, va, prot, VM_FAULT_NORMAL, mp) != KERN_SUCCESS)