Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F140120818
D49331.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
6 KB
Referenced Files
None
Subscribers
None
D49331.diff
View Options
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1325,8 +1325,8 @@
("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
__func__, file, line));
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
- ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
- lk->lock_object.lo_name, file, line));
+ ("%s: idle thread %p on lockmgr %p @ %s:%d", __func__, curthread,
+ lk, file, line));
class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -598,8 +598,8 @@
return;
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
- ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
- curthread, rm->lock_object.lo_name, file, line));
+ ("rm_wlock() by idle thread %p on rmlock %p @ %s:%d",
+ curthread, rm, file, line));
KASSERT(!rm_destroyed(rm),
("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
_rm_assert(rm, RA_UNLOCKED, file, line);
@@ -643,14 +643,14 @@
critical_enter();
KASSERT(rm_trackers_present(get_pcpu(), rm,
curthread) == 0,
- ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
- rm->lock_object.lo_name, file, line));
+ ("rm_rlock: recursed on non-recursive rmlock %p @ %s:%d\n",
+ rm, file, line));
critical_exit();
}
#endif
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
- ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
- curthread, rm->lock_object.lo_name, file, line));
+ ("rm_rlock() by idle thread %p on rmlock %p @ %s:%d",
+ curthread, rm, file, line));
KASSERT(!rm_destroyed(rm),
("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
if (!trylock) {
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -292,8 +292,8 @@
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
!TD_IS_IDLETHREAD(curthread),
- ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
- curthread, rw->lock_object.lo_name, file, line));
+ ("rw_wlock() by idle thread %p on rwlock %p @ %s:%d",
+ curthread, rw, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -325,8 +325,8 @@
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
- ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
- curthread, rw->lock_object.lo_name, file, line));
+ ("rw_try_wlock() by idle thread %p on rwlock %p @ %s:%d",
+ curthread, rw, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
@@ -681,13 +681,13 @@
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
!TD_IS_IDLETHREAD(td),
- ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
- td, rw->lock_object.lo_name, file, line));
+ ("rw_rlock() by idle thread %p on rwlock %p @ %s:%d",
+ td, rw, file, line));
KASSERT(rw->rw_lock != RW_DESTROYED,
("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
KASSERT(rw_wowner(rw) != td,
- ("rw_rlock: wlock already held for %s @ %s:%d",
- rw->lock_object.lo_name, file, line));
+ ("rw_rlock: wlock already held for %p @ %s:%d",
+ rw, file, line));
WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
v = RW_READ_VALUE(rw);
@@ -721,8 +721,8 @@
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
- ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
- curthread, rw->lock_object.lo_name, file, line));
+ ("rw_try_rlock() by idle thread %p on rwlock %p @ %s:%d",
+ curthread, rw, file, line));
x = rw->rw_lock;
for (;;) {
@@ -970,8 +970,8 @@
if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
- ("%s: recursing but non-recursive rw %s @ %s:%d\n",
- __func__, rw->lock_object.lo_name, file, line));
+ ("%s: recursing but non-recursive rw %p @ %s:%d\n",
+ __func__, rw, file, line));
rw->rw_recurse++;
atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
if (LOCK_LOG_TEST(&rw->lock_object, 0))
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -278,8 +278,8 @@
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
- ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
- curthread, sx->lock_object.lo_name, file, line));
+ ("sx_try_slock() by idle thread %p on sx %p @ %s:%d",
+ curthread, sx, file, line));
x = sx->sx_lock;
for (;;) {
@@ -317,8 +317,8 @@
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
!TD_IS_IDLETHREAD(curthread),
- ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
- curthread, sx->lock_object.lo_name, file, line));
+ ("sx_xlock() by idle thread %p on sx %p @ %s:%d",
+ curthread, sx, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
@@ -354,8 +354,8 @@
return (1);
KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
- ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
- curthread, sx->lock_object.lo_name, file, line));
+ ("sx_try_xlock() by idle thread %p on sx %p @ %s:%d",
+ curthread, sx, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
@@ -617,8 +617,8 @@
/* If we already hold an exclusive lock, then recurse. */
if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
- ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
- sx->lock_object.lo_name, file, line));
+ ("_sx_xlock_hard: recursed on non-recursive sx %p @ %s:%d\n",
+ sx, file, line));
sx->sx_recurse++;
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1264,8 +1264,8 @@
KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
!TD_IS_IDLETHREAD(curthread),
- ("sx_slock() by idle thread %p on sx %s @ %s:%d",
- curthread, sx->lock_object.lo_name, file, line));
+ ("sx_slock() by idle thread %p on sx %p @ %s:%d",
+ curthread, sx, file, line));
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Dec 21, 12:40 PM (10 h, 19 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27113136
Default Alt Text
D49331.diff (6 KB)
Attached To
Mode
D49331: lockmgr/rmlock/rwlock/sx: Make various assertions more robust
Attached
Detach File
Event Timeline
Log In to Comment