Index: sys/kern/kern_conf.c =================================================================== --- sys/kern/kern_conf.c +++ sys/kern/kern_conf.c @@ -866,11 +866,13 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; +#endif va_start(ap, fmt); - res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, - ap); + DBGSET(res, make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, + ap)); va_end(ap); KASSERT(res == 0 && dev != NULL, ("make_dev: failed make_dev_credv (error=%d)", res)); @@ -883,10 +885,12 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; +#endif va_start(ap, fmt); - res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); + DBGSET(res, make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap)); va_end(ap); KASSERT(res == 0 && dev != NULL, @@ -996,10 +1000,11 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; - +#endif va_start(ap, fmt); - res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap); + DBGSET(res, make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap)); va_end(ap); KASSERT(res == 0 && dev != NULL, Index: sys/kern/kern_cpuset.c =================================================================== --- sys/kern/kern_cpuset.c +++ sys/kern/kern_cpuset.c @@ -1385,8 +1385,10 @@ cpuset_thread0(void) { struct cpuset *set; - int error; int i; +#ifdef INVARIANTS + int error; +#endif cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); @@ -1411,14 +1413,14 @@ * Now derive a default (1), modifiable set from that to give out. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); - error = _cpuset_create(set, cpuset_zero, NULL, NULL, 1); + DBGSET(error, _cpuset_create(set, cpuset_zero, NULL, NULL, 1)); KASSERT(error == 0, ("Error creating default set: %d\n", error)); cpuset_default = set; /* * Create the kernel set (2). */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); - error = _cpuset_create(set, cpuset_zero, NULL, NULL, 2); + DBGSET(error, _cpuset_create(set, cpuset_zero, NULL, NULL, 2)); KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); set->cs_domain = &domainset2; cpuset_kernel = set; Index: sys/kern/kern_descrip.c =================================================================== --- sys/kern/kern_descrip.c +++ sys/kern/kern_descrip.c @@ -2625,9 +2625,9 @@ struct file **fpp, seq_t *seqp) { #ifdef CAPABILITIES - struct filedescent *fde; + const struct filedescent *fde; #endif - struct fdescenttbl *fdt; + const struct fdescenttbl *fdt; struct file *fp; u_int count; #ifdef CAPABILITIES @@ -2673,7 +2673,7 @@ * table before this fd was closed, so it possible that * there is a stale fp pointer in cached version. */ - fdt = *(struct fdescenttbl * volatile *)&(fdp->fd_files); + fdt = *(const struct fdescenttbl * const volatile *)&(fdp->fd_files); continue; } /* Index: sys/kern/kern_intr.c =================================================================== --- sys/kern/kern_intr.c +++ sys/kern/kern_intr.c @@ -949,7 +949,6 @@ struct intr_thread *it; struct thread *td; struct thread *ctd; - struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. @@ -961,7 +960,6 @@ ctd = curthread; it = ie->ie_thread; td = it->it_thread; - p = td->td_proc; /* * If any of the handlers for this ithread claim to be good @@ -973,7 +971,7 @@ random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT); } - KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); + KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already @@ -1215,7 +1213,9 @@ struct intr_handler *ih = (struct intr_handler *)cookie; struct intr_event *ie = ih->ih_event; struct intr_entropy entropy; +#ifdef INVARIANTS int error; +#endif CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, ih->ih_need); @@ -1234,9 +1234,9 @@ if (!(flags & SWI_DELAY)) { VM_CNT_INC(v_soft); #ifdef INTR_FILTER - error = intr_event_schedule_thread(ie, ie->ie_thread); + DBGSET(error, intr_event_schedule_thread(ie, ie->ie_thread)); #else - error = intr_event_schedule_thread(ie); + DBGSET(error, intr_event_schedule_thread(ie)); #endif KASSERT(error == 0, ("stray software interrupt")); } @@ -1474,7 +1474,7 @@ struct intr_handler *ih; struct trapframe *oldframe; struct thread *td; - int error, ret, thread; + int ret, thread; td = curthread; @@ -1547,7 +1547,7 @@ /* Schedule the ithread if needed. */ if (thread) { - error = intr_event_schedule_thread(ie); + DBGSET(int error, intr_event_schedule_thread(ie)); KASSERT(error == 0, ("bad stray interrupt")); } critical_exit(); Index: sys/kern/kern_lockf.c =================================================================== --- sys/kern/kern_lockf.c +++ sys/kern/kern_lockf.c @@ -2235,8 +2235,11 @@ { struct owner_edge *e; struct owner_vertex_list deltaF, deltaB; - int nF, nB, n, vi, i; + int nF, n, vi, i; int *indices; +#ifdef LOCKF_DEBUG + int nB; +#endif sx_assert(&lf_owner_graph_lock, SX_XLOCKED); @@ -2300,9 +2303,9 @@ printf("deltaF = "); graph_print_vertices(&deltaF); } + nB = #endif - - nB = graph_delta_backward(g, x, y, &deltaB); + graph_delta_backward(g, x, y, &deltaB); #ifdef LOCKF_DEBUG if (lockf_debug & 8) { Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -341,8 +341,10 @@ } for (int i = 0; i < npages;) { - int j, a, count, rv; - + int j, a, count; +#ifdef INVARIANTS + int rv; +#endif /* Skip valid pages. */ if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, xfsize(i, npages, off, len))) { @@ -401,9 +403,9 @@ } refcount_acquire(&sfio->nios); - rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, + DBGSET(rv, vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, - &sendfile_iodone, sfio); + &sendfile_iodone, sfio)); KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p", __func__, obj, pa[i])); @@ -688,6 +690,7 @@ if (space == 0) { sfio = NULL; nios = 0; + npages = 0; goto prepend_header; } hdr_uio = NULL; Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c +++ sys/kern/kern_umtx.c @@ -662,11 +662,9 @@ static int umtxq_count(struct umtx_key *key) { - struct umtxq_chain *uc; struct umtxq_queue *uh; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); if (uh != NULL) return (uh->length); @@ -680,12 +678,10 @@ static int umtxq_count_pi(struct umtx_key *key, struct umtx_q **first) { - struct umtxq_chain *uc; struct umtxq_queue *uh; *first = NULL; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); if (uh != NULL) { *first = TAILQ_FIRST(&uh->head); @@ -727,14 +723,12 @@ static int umtxq_signal_queue(struct umtx_key *key, int n_wake, int q) { - struct umtxq_chain *uc; struct umtxq_queue *uh; struct umtx_q *uq; int ret; ret = 0; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, q); if (uh != NULL) { while ((uq = TAILQ_FIRST(&uh->head)) != NULL) { @@ -754,10 +748,8 @@ static inline void umtxq_signal_thread(struct umtx_q *uq) { - struct umtxq_chain *uc; - uc = umtxq_getchain(&uq->uq_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); umtxq_remove(uq); wakeup(uq); } @@ -1663,16 +1655,18 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, const char *wmesg, struct abs_timeout *timo, bool shared) { - struct umtxq_chain *uc; struct thread *td, *td1; struct umtx_q *uq1; int error, pri; +#ifdef INVARIANTS + struct umtxq_chain *uc; + uc = umtxq_getchain(&pi->pi_key); +#endif error = 0; td = uq->uq_thread; KASSERT(td == curthread, ("inconsistent uq_thread")); - uc = umtxq_getchain(&uq->uq_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); umtxq_insert(uq); mtx_lock(&umtx_lock); @@ -1728,10 +1722,8 @@ static void umtx_pi_ref(struct umtx_pi *pi) { - struct umtxq_chain *uc; - uc = umtxq_getchain(&pi->pi_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key)); pi->pi_refcount++; } Index: sys/kern/subr_capability.c =================================================================== --- sys/kern/subr_capability.c +++ sys/kern/subr_capability.c @@ -49,6 +49,7 @@ #include #include #include +#define INVARIANTS #endif #ifdef _KERNEL @@ -108,12 +109,17 @@ cap_rights_vset(cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; +#endif assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); +#ifdef INVARIANTS n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); +#endif for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); @@ -133,13 +139,15 @@ cap_rights_vclear(cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); - +#endif for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); if (right == 0) @@ -158,12 +166,14 @@ cap_rights_is_vset(const cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); - n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); +#endif for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); @@ -194,13 +204,11 @@ cap_rights_t * __cap_rights_init(int version, cap_rights_t *rights, ...) { - unsigned int n; va_list ap; assert(version == CAP_RIGHTS_VERSION_00); - n = version + 2; - assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); + assert(version >= CAPARSIZE_MIN-2 && version <= CAPARSIZE_MAX-2); CAP_NONE(rights); va_start(ap, rights); cap_rights_vset(rights, ap); @@ -339,7 +347,6 @@ n = CAPARSIZE(big); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); - for (i = 0; i < n; i++) { if ((big->cr_rights[i] & little->cr_rights[i]) != little->cr_rights[i]) { Index: sys/kern/subr_sleepqueue.c =================================================================== --- sys/kern/subr_sleepqueue.c +++ sys/kern/subr_sleepqueue.c @@ -384,12 +384,14 @@ sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, int flags) { - struct sleepqueue_chain *sc; struct thread *td; sbintime_t pr1; +#ifdef INVARIANTS + struct sleepqueue_chain *sc; - td = curthread; sc = SC_LOOKUP(wchan); +#endif + td = curthread; mtx_assert(&sc->sc_lock, MA_OWNED); MPASS(TD_ON_SLEEPQ(td)); MPASS(td->td_sleepqueue == NULL); @@ -780,6 +782,7 @@ static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) { +#ifdef INVARIANTS struct sleepqueue_chain *sc; MPASS(td != NULL); @@ -789,6 +792,7 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); sc = SC_LOOKUP(sq->sq_wchan); mtx_assert(&sc->sc_lock, MA_OWNED); +#endif SDT_PROBE2(sched, , , wakeup, td, td->td_proc); @@ -974,7 +978,6 @@ static void sleepq_timeout(void *arg) { - struct sleepqueue_chain *sc; struct sleepqueue *sq; struct thread *td; void *wchan; @@ -996,9 +999,13 @@ * See if the thread is asleep and get the wait * channel if it is. */ - wchan = td->td_wchan; - sc = SC_LOOKUP(wchan); +#ifdef INVARIANTS + struct sleepqueue_chain *sc; + + sc = SC_LOOKUP(td->td_wchan); THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); +#endif + wchan = td->td_wchan; sq = sleepq_lookup(wchan); MPASS(sq != NULL); td->td_flags |= TDF_TIMEOUT; Index: sys/kern/subr_turnstile.c =================================================================== --- sys/kern/subr_turnstile.c +++ sys/kern/subr_turnstile.c @@ -804,7 +804,6 @@ int turnstile_signal(struct turnstile *ts, int queue) { - struct turnstile_chain *tc; struct thread *td; int empty; @@ -833,9 +832,13 @@ empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); if (empty) { +#if defined(INVARIANTS) || defined(TURNSTILE_PROFILING) + struct turnstile_chain *tc; + tc = TC_LOOKUP(ts->ts_lockobj); mtx_assert(&tc->tc_lock, MA_OWNED); MPASS(LIST_EMPTY(&ts->ts_free)); +#endif #ifdef TURNSTILE_PROFILING tc->tc_depth--; #endif @@ -855,21 +858,24 @@ void turnstile_broadcast(struct turnstile *ts, int queue) { - struct turnstile_chain *tc; struct turnstile *ts1; struct thread *td; +#ifdef INVARIANTS + struct turnstile_chain *tc; - MPASS(ts != NULL); - mtx_assert(&ts->ts_lock, MA_OWNED); - MPASS(curthread->td_proc->p_magic == P_MAGIC); - MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); /* * We must have the chain locked so that we can remove the empty * turnstile from the hash queue. */ + MPASS(ts != NULL); tc = TC_LOOKUP(ts->ts_lockobj); mtx_assert(&tc->tc_lock, MA_OWNED); + + mtx_assert(&ts->ts_lock, MA_OWNED); + MPASS(curthread->td_proc->p_magic == P_MAGIC); + MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); +#endif /* * Transfer the blocked list to the pending list. Index: sys/kern/sys_capability.c =================================================================== --- sys/kern/sys_capability.c +++ sys/kern/sys_capability.c @@ -183,7 +183,7 @@ * Convert capability rights into VM access flags. */ u_char -cap_rights_to_vmprot(cap_rights_t *havep) +cap_rights_to_vmprot(const cap_rights_t *havep) { u_char maxprot; @@ -204,14 +204,14 @@ * this one file. */ -cap_rights_t * -cap_rights_fde(struct filedescent *fdep) +const cap_rights_t * +cap_rights_fde(const struct filedescent *fdep) { return (&fdep->fde_rights); } -cap_rights_t * +const cap_rights_t * cap_rights(struct filedesc *fdp, int fd) { Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -1851,10 +1851,8 @@ static void bd_init(struct bufdomain *bd) { - int domain; int i; - domain = bd - bdomain; bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1]; bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock"); bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock"); @@ -2843,16 +2841,19 @@ vm_ooffset_t foff; vm_page_t m; vm_object_t obj; - struct vnode *vp; int i, iosize, resid; bool bogus; +#ifdef INVARIANTS + struct vnode *vp; + + vp = bp->b_vp; +#endif obj = bp->b_bufobj->bo_object; KASSERT(obj->paging_in_progress >= bp->b_npages, ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)", obj->paging_in_progress, bp->b_npages)); - vp = bp->b_vp; KASSERT(vp->v_holdcnt > 0, ("vfs_vmio_iodone: vnode %p has zero hold count", vp)); KASSERT(vp->v_object != NULL, @@ -5014,14 +5015,15 @@ void bufstrategy(struct bufobj *bo, struct buf *bp) { - int i = 0; struct vnode *vp; - +#ifdef INVARIANTS + int i = 0; +#endif vp = bp->b_vp; KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); - i = VOP_STRATEGY(vp, bp); + DBGSET(i, VOP_STRATEGY(vp, bp)); KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); } Index: sys/kern/vfs_lookup.c =================================================================== --- sys/kern/vfs_lookup.c +++ sys/kern/vfs_lookup.c @@ -92,16 +92,20 @@ crossmp_vop_lock1(struct vop_lock1_args *ap) { struct vnode *vp; + int flags; +#if defined(WITNESS) || defined(KTR) struct lock *lk; const char *file; - int flags, line; + int line; - vp = ap->a_vp; - lk = vp->v_vnlock; - flags = ap->a_flags; + lk = ap->a_vp->v_vnlock; file = ap->a_file; line = ap->a_line; +#endif + vp = ap->a_vp; + flags = ap->a_flags; + if ((flags & LK_SHARED) == 0) panic("invalid lock request for crossmp"); @@ -110,7 +114,7 @@ WITNESS_LOCK(&lk->lock_object, 0, file, line); if ((flags & LK_INTERLOCK) != 0) VI_UNLOCK(vp); - LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, ap->a_file, line); + LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); return (0); } @@ -118,11 +122,13 @@ crossmp_vop_unlock(struct vop_unlock_args *ap) { struct vnode *vp; - struct lock *lk; int flags; +#if defined(WITNESS) || defined(KTR) + struct lock *lk; + lk = ap->a_vp->v_vnlock; +#endif vp = ap->a_vp; - lk = vp->v_vnlock; flags = ap->a_flags; if ((flags & LK_INTERLOCK) != 0) Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -1405,7 +1405,6 @@ struct thread *td; struct lock_object *lo; static int cyclecount; - int error; CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); vp = NULL; @@ -1436,7 +1435,7 @@ else if (freevnodes > 0) vnlru_free_locked(1, NULL); else { - error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & + getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)); #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ if (error != 0) { Index: sys/sys/capsicum.h =================================================================== --- sys/sys/capsicum.h +++ sys/sys/capsicum.h @@ -444,14 +444,14 @@ /* * Convert capability rights into VM access flags. */ -u_char cap_rights_to_vmprot(cap_rights_t *havep); +u_char cap_rights_to_vmprot(const cap_rights_t *havep); /* * For the purposes of procstat(1) and similar tools, allow kern_descrip.c to * extract the rights from a capability. */ -cap_rights_t *cap_rights_fde(struct filedescent *fde); -cap_rights_t *cap_rights(struct filedesc *fdp, int fd); +const cap_rights_t *cap_rights_fde(const struct filedescent *fde); +const cap_rights_t *cap_rights(struct filedesc *fdp, int fd); int cap_ioctl_check(struct filedesc *fdp, int fd, u_long cmd); int cap_fcntl_check_fde(struct filedescent *fde, int cmd);