Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -261,7 +261,7 @@ for (int i = 0; i < count; i++) if (pg[i] != bogus_page) - vm_page_xunbusy(pg[i]); + vm_page_xunbusy_unchecked(pg[i]); if (error) sfio->error = error; Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -307,7 +307,13 @@ #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED) #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT) -#define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE +#define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE +#ifdef INVARIANTS +#define VPB_CURTHREAD_EXCLUSIVE \ + (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK)) +#else +#define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE +#endif #define VPB_UNBUSIED VPB_SHARERS_WORD(0) @@ -648,6 +654,7 @@ void vm_page_wire(vm_page_t); bool vm_page_wire_mapped(vm_page_t m); void vm_page_xunbusy_hard(vm_page_t m); +void vm_page_xunbusy_hard_unchecked(vm_page_t m); void vm_page_set_validclean (vm_page_t, int, int); void vm_page_clear_dirty(vm_page_t, int, int); void vm_page_set_invalid(vm_page_t, int, int); @@ -683,10 +690,19 @@ ("vm_page_assert_unbusied: page %p busy @ %s:%d", \ (m), __FILE__, __LINE__)) -#define vm_page_assert_xbusied(m) \ +#define vm_page_assert_xbusied_unchecked(m) do { \ KASSERT(vm_page_xbusied(m), \ ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \ - (m), __FILE__, __LINE__)) + (m), __FILE__, __LINE__)); \ +} while (0) +#define vm_page_assert_xbusied(m) do { \ + vm_page_assert_xbusied_unchecked(m); \ + KASSERT((m->busy_lock & ~VPB_BIT_WAITERS) == \ + VPB_CURTHREAD_EXCLUSIVE, \ + ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \ + " by me @ %s:%d", \ + (m), (m)->busy_lock, __FILE__, __LINE__)); \ +} while (0) #define vm_page_busied(m) \ ((m)->busy_lock != VPB_UNBUSIED) @@ -698,7 +714,7 @@ } while (0) #define vm_page_xbusied(m) \ - (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0) + (((m)->busy_lock & VPB_SINGLE_EXCLUSIVE) != 0) #define vm_page_xbusy(m) do { \ if (!vm_page_tryxbusy(m)) \ @@ -709,9 +725,14 @@ /* Note: page m's lock must not be owned by the caller. */ #define vm_page_xunbusy(m) do { \ if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ - VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \ + VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ vm_page_xunbusy_hard(m); \ } while (0) +#define vm_page_xunbusy_unchecked(m) do { \ + if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ + VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ + vm_page_xunbusy_hard_unchecked(m); \ +} while (0) #ifdef INVARIANTS void vm_page_object_busy_assert(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c +++ sys/vm/vm_page.c @@ -439,7 +439,7 @@ bzero(marker, sizeof(*marker)); marker->flags = PG_MARKER; marker->aflags = aflags; - marker->busy_lock = VPB_SINGLE_EXCLUSIVER; + marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE; marker->queue = queue; } @@ -941,18 +941,19 @@ int vm_page_busy_tryupgrade(vm_page_t m) { - u_int x; + u_int ce, x; vm_page_assert_sbusied(m); x = m->busy_lock; + ce = VPB_CURTHREAD_EXCLUSIVE; for (;;) { if (VPB_SHARERS(x) > 1) return (0); KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1), ("vm_page_busy_tryupgrade: invalid lock state")); if (!atomic_fcmpset_acq_int(&m->busy_lock, &x, - VPB_SINGLE_EXCLUSIVER | (x & VPB_BIT_WAITERS))) + ce | (x & VPB_BIT_WAITERS))) continue; return (1); } @@ -1110,7 +1111,7 @@ vm_object_t obj; if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED, - VPB_SINGLE_EXCLUSIVER) == 0) + VPB_CURTHREAD_EXCLUSIVE) == 0) return (0); obj = m->object; @@ -1121,6 +1122,14 @@ return (1); } +static void +vm_page_xunbusy_hard_tail(vm_page_t m) +{ + atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); + /* Wake the waiter. */ + wakeup(m); +} + /* * vm_page_xunbusy_hard: * @@ -1129,14 +1138,15 @@ void vm_page_xunbusy_hard(vm_page_t m) { - vm_page_assert_xbusied(m); + vm_page_xunbusy_hard_tail(m); +} - /* - * Wake the waiter. - */ - atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED); - wakeup(m); +void +vm_page_xunbusy_hard_unchecked(vm_page_t m) +{ + vm_page_assert_xbusied_unchecked(m); + vm_page_xunbusy_hard_tail(m); } /* @@ -1230,7 +1240,7 @@ m->flags = PG_FICTITIOUS; /* Fictitious pages don't use "order" or "pool". */ m->oflags = VPO_UNMANAGED; - m->busy_lock = VPB_SINGLE_EXCLUSIVER; + m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; /* Fictitious pages are unevictable. */ m->ref_count = 1; pmap_page_init(m); @@ -1320,7 +1330,7 @@ else vm_page_deactivate(m); vm_page_unlock(m); - vm_page_xunbusy(m); + vm_page_xunbusy_unchecked(m); } /* @@ -1962,7 +1972,7 @@ VPO_UNMANAGED : 0; m->busy_lock = VPB_UNBUSIED; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) - m->busy_lock = VPB_SINGLE_EXCLUSIVER; + m->busy_lock = VPB_CURTHREAD_EXCLUSIVE; if ((req & VM_ALLOC_SBUSY) != 0) m->busy_lock = VPB_SHARERS_WORD(1); if (req & VM_ALLOC_WIRED) { @@ -2156,7 +2166,7 @@ VPO_UNMANAGED : 0; busy_lock = VPB_UNBUSIED; if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0) - busy_lock = VPB_SINGLE_EXCLUSIVER; + busy_lock = VPB_CURTHREAD_EXCLUSIVE; if ((req & VM_ALLOC_SBUSY) != 0) busy_lock = VPB_SHARERS_WORD(1); if ((req & VM_ALLOC_WIRED) != 0)