Page MenuHomeFreeBSD

D25753.id74779.diff
No OneTemporary

D25753.id74779.diff

Index: head/sys/kern/kern_lock.c
===================================================================
--- head/sys/kern/kern_lock.c
+++ head/sys/kern/kern_lock.c
@@ -167,6 +167,12 @@
#endif
};
+static __read_mostly bool lk_adaptive = true;
+static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
+SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
+ 0, "");
+#define lockmgr_delay locks_delay
+
struct lockmgr_wait {
const char *iwmesg;
int ipri;
@@ -515,7 +521,6 @@
* waiters, if we fail to acquire the shared lock
* loop back and retry.
*/
- *xp = lockmgr_read_value(lk);
while (LK_CAN_SHARE(*xp, flags, fp)) {
if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
*xp + LK_ONE_SHARER)) {
@@ -541,6 +546,38 @@
return (false);
}
+static bool
+lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
+ int flags)
+{
+ struct thread *owner;
+ uintptr_t x;
+
+ x = *xp;
+ MPASS(x != LK_UNLOCKED);
+ owner = (struct thread *)LK_HOLDER(x);
+ for (;;) {
+ MPASS(owner != curthread);
+ if (owner == (struct thread *)LK_KERNPROC)
+ return (false);
+ if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
+ return (false);
+ if (owner == NULL)
+ return (false);
+ if (!TD_IS_RUNNING(owner))
+ return (false);
+ if ((x & LK_ALL_WAITERS) != 0)
+ return (false);
+ lock_delay(lda);
+ x = lockmgr_read_value(lk);
+ if (LK_CAN_SHARE(x, flags, false)) {
+ *xp = x;
+ return (true);
+ }
+ owner = (struct thread *)LK_HOLDER(x);
+ }
+}
+
static __noinline int
lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *file, int line, struct lockmgr_wait *lwa)
@@ -557,6 +594,7 @@
uint64_t waittime = 0;
int contested = 0;
#endif
+ struct lock_delay_arg lda;
if (KERNEL_PANICKED())
goto out;
@@ -566,9 +604,31 @@
if (LK_CAN_WITNESS(flags))
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
file, line, flags & LK_INTERLOCK ? ilk : NULL);
+ lock_delay_arg_init(&lda, &lockmgr_delay);
+ if (!lk_adaptive)
+ flags &= ~LK_ADAPTIVE;
+ x = lockmgr_read_value(lk);
+ /*
+ * The lock may already be locked exclusive by curthread,
+ * avoid deadlock.
+ */
+ if (LK_HOLDER(x) == tid) {
+ LOCK_LOG2(lk,
+ "%s: %p already held in exclusive mode",
+ __func__, lk);
+ error = EDEADLK;
+ goto out;
+ }
+
for (;;) {
if (lockmgr_slock_try(lk, &x, flags, false))
break;
+
+ if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
+ if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
+ continue;
+ }
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -576,18 +636,6 @@
&contested, &waittime);
/*
- * If the lock is already held by curthread in
- * exclusive way avoid a deadlock.
- */
- if (LK_HOLDER(x) == tid) {
- LOCK_LOG2(lk,
- "%s: %p already held in exclusive mode",
- __func__, lk);
- error = EDEADLK;
- break;
- }
-
- /*
* If the lock is expected to not sleep just give up
* and return.
*/
@@ -660,6 +708,7 @@
}
LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
__func__, lk);
+ x = lockmgr_read_value(lk);
}
if (error == 0) {
#ifdef KDTRACE_HOOKS
@@ -682,6 +731,37 @@
return (error);
}
+static bool
+lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
+{
+ struct thread *owner;
+ uintptr_t x;
+
+ x = *xp;
+ MPASS(x != LK_UNLOCKED);
+ owner = (struct thread *)LK_HOLDER(x);
+ for (;;) {
+ MPASS(owner != curthread);
+ if (owner == NULL)
+ return (false);
+ if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
+ return (false);
+ if (owner == (struct thread *)LK_KERNPROC)
+ return (false);
+ if (!TD_IS_RUNNING(owner))
+ return (false);
+ if ((x & LK_ALL_WAITERS) != 0)
+ return (false);
+ lock_delay(lda);
+ x = lockmgr_read_value(lk);
+ if (x == LK_UNLOCKED) {
+ *xp = x;
+ return (true);
+ }
+ owner = (struct thread *)LK_HOLDER(x);
+ }
+}
+
static __noinline int
lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *file, int line, struct lockmgr_wait *lwa)
@@ -699,6 +779,7 @@
uint64_t waittime = 0;
int contested = 0;
#endif
+ struct lock_delay_arg lda;
if (KERNEL_PANICKED())
goto out;
@@ -747,10 +828,19 @@
goto out;
}
+ x = LK_UNLOCKED;
+ lock_delay_arg_init(&lda, &lockmgr_delay);
+ if (!lk_adaptive)
+ flags &= ~LK_ADAPTIVE;
for (;;) {
- if (lk->lk_lock == LK_UNLOCKED &&
- atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
- break;
+ if (x == LK_UNLOCKED) {
+ if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
+ break;
+ }
+ if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
+ if (lockmgr_xlock_adaptive(&lda, lk, &x))
+ continue;
+ }
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -853,6 +943,7 @@
}
LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
__func__, lk);
+ x = lockmgr_read_value(lk);
}
if (error == 0) {
#ifdef KDTRACE_HOOKS
@@ -954,6 +1045,7 @@
file, line, flags & LK_INTERLOCK ? ilk : NULL);
if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
break;
+ x = lockmgr_read_value(lk);
if (lockmgr_slock_try(lk, &x, flags, true)) {
lockmgr_note_shared_acquire(lk, 0, 0,
file, line, flags);
@@ -1139,12 +1231,13 @@
if (LK_CAN_WITNESS(flags))
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
file, line, NULL);
+ x = lockmgr_read_value(lk);
if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
return (0);
}
- return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL));
+ return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
}
int
@@ -1165,7 +1258,7 @@
return (0);
}
- return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL));
+ return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
}
int
Index: head/sys/sys/lockmgr.h
===================================================================
--- head/sys/sys/lockmgr.h
+++ head/sys/sys/lockmgr.h
@@ -170,6 +170,7 @@
#define LK_SLEEPFAIL 0x000800
#define LK_TIMELOCK 0x001000
#define LK_NODDLKTREAT 0x002000
+#define LK_ADAPTIVE 0x004000
/*
* Operations for lockmgr().
Index: head/sys/ufs/ffs/ffs_vnops.c
===================================================================
--- head/sys/ufs/ffs/ffs_vnops.c
+++ head/sys/ufs/ffs/ffs_vnops.c
@@ -445,6 +445,7 @@
struct lock *lkp;
int result;
+ ap->a_flags |= LK_ADAPTIVE;
switch (ap->a_flags & LK_TYPE_MASK) {
case LK_SHARED:
case LK_UPGRADE:
@@ -482,6 +483,7 @@
}
return (result);
#else
+ ap->a_flags |= LK_ADAPTIVE;
return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
#endif
}

File Metadata

Mime Type
text/plain
Expires
Tue, Nov 25, 2:41 PM (19 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
26131326
Default Alt Text
D25753.id74779.diff (6 KB)

Event Timeline