Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c +++ sys/kern/kern_clocksource.c @@ -272,18 +272,22 @@ #ifdef SMP int cpu; #endif +#ifdef KTR int c; + c = -1; +#endif state = DPCPU_PTR(timerstate); event = state->nextevent; - c = -1; #ifdef SMP if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { CPU_FOREACH(cpu) { state = DPCPU_ID_PTR(cpu, timerstate); if (event > state->nextevent) { event = state->nextevent; +#ifdef KTR c = cpu; +#endif } } } Index: sys/kern/kern_conf.c =================================================================== --- sys/kern/kern_conf.c +++ sys/kern/kern_conf.c @@ -866,10 +866,15 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; +#endif va_start(ap, fmt); - res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, +#ifdef INVARIANTS + res = +#endif + make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, @@ -883,10 +888,15 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; +#endif va_start(ap, fmt); - res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); +#ifdef INVARIANTS + res = +#endif + make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, @@ -996,10 +1006,14 @@ { struct cdev *dev; va_list ap; +#ifdef INVARIANTS int res; - +#endif va_start(ap, fmt); - res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap); +#ifdef INVARIANTS + res = +#endif + make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap); va_end(ap); KASSERT(res == 0 && dev != NULL, Index: sys/kern/kern_cpuset.c =================================================================== --- sys/kern/kern_cpuset.c +++ sys/kern/kern_cpuset.c @@ -1385,9 +1385,11 @@ cpuset_thread0(void) { struct cpuset *set; - int error; int i; - +#ifdef INVARIANTS + int error; +#endif + cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); domainset_zone = uma_zcreate("domainset", sizeof(struct domainset), @@ -1411,14 +1413,20 @@ * Now derive a default (1), modifiable set from that to give out. */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); - error = _cpuset_create(set, cpuset_zero, NULL, NULL, 1); +#ifdef INVARIANTS + error = +#endif + _cpuset_create(set, cpuset_zero, NULL, NULL, 1); KASSERT(error == 0, ("Error creating default set: %d\n", error)); cpuset_default = set; /* * Create the kernel set (2). */ set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); - error = _cpuset_create(set, cpuset_zero, NULL, NULL, 2); +#ifdef INVARIANTS + error = +#endif + _cpuset_create(set, cpuset_zero, NULL, NULL, 2); KASSERT(error == 0, ("Error creating kernel set: %d\n", error)); set->cs_domain = &domainset2; cpuset_kernel = set; Index: sys/kern/kern_descrip.c =================================================================== --- sys/kern/kern_descrip.c +++ sys/kern/kern_descrip.c @@ -2625,9 +2625,9 @@ struct file **fpp, seq_t *seqp) { #ifdef CAPABILITIES - struct filedescent *fde; + const struct filedescent *fde; #endif - struct fdescenttbl *fdt; + const struct fdescenttbl *fdt; struct file *fp; u_int count; #ifdef CAPABILITIES @@ -2673,7 +2673,7 @@ * table before this fd was closed, so it possible that * there is a stale fp pointer in cached version. */ - fdt = *(struct fdescenttbl * volatile *)&(fdp->fd_files); + fdt = *(const struct fdescenttbl * const volatile *)&(fdp->fd_files); continue; } /* Index: sys/kern/kern_event.c =================================================================== --- sys/kern/kern_event.c +++ sys/kern/kern_event.c @@ -751,12 +751,17 @@ filt_timerdetach(struct knote *kn) { struct kq_timer_cb_data *kc; +#ifdef INVARIANTS unsigned int old; +#endif kc = kn->kn_ptr.p_v; callout_drain(&kc->c); free(kc, M_KQUEUE); - old = atomic_fetchadd_int(&kq_ncallouts, -1); +#ifdef INVARIANTS + old = +#endif + atomic_fetchadd_int(&kq_ncallouts, -1); KASSERT(old > 0, ("Number of callouts cannot become negative")); kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ } Index: sys/kern/kern_intr.c =================================================================== --- sys/kern/kern_intr.c +++ sys/kern/kern_intr.c @@ -949,7 +949,6 @@ struct intr_thread *it; struct thread *td; struct thread *ctd; - struct proc *p; /* * If no ithread or no handlers, then we have a stray interrupt. @@ -961,7 +960,6 @@ ctd = curthread; it = ie->ie_thread; td = it->it_thread; - p = td->td_proc; /* * If any of the handlers for this ithread claim to be good @@ -973,7 +971,7 @@ random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT); } - KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); + KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); /* * Set it_need to tell the thread to keep running if it is already @@ -1215,7 +1213,9 @@ struct intr_handler *ih = (struct intr_handler *)cookie; struct intr_event *ie = ih->ih_event; struct intr_entropy entropy; +#ifdef INVARIANTS int error; +#endif CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, ih->ih_need); @@ -1233,10 +1233,13 @@ if (!(flags & SWI_DELAY)) { VM_CNT_INC(v_soft); +#ifdef INVARIANTS + error = +#endif #ifdef INTR_FILTER - error = intr_event_schedule_thread(ie, ie->ie_thread); + intr_event_schedule_thread(ie, ie->ie_thread); #else - error = intr_event_schedule_thread(ie); + intr_event_schedule_thread(ie); #endif KASSERT(error == 0, ("stray software interrupt")); } @@ -1474,7 +1477,7 @@ struct intr_handler *ih; struct trapframe *oldframe; struct thread *td; - int error, ret, thread; + int ret, thread; td = curthread; @@ -1547,7 +1550,10 @@ /* Schedule the ithread if needed. */ if (thread) { - error = intr_event_schedule_thread(ie); +#ifdef INVARIANTS + int error = +#endif + intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); } critical_exit(); Index: sys/kern/kern_lock.c =================================================================== --- sys/kern/kern_lock.c +++ sys/kern/kern_lock.c @@ -1101,7 +1101,6 @@ { struct lock_class *class; uintptr_t x, tid; - bool unlocked; const char *file; int line; @@ -1112,12 +1111,10 @@ line = __LINE__; _lockmgr_assert(lk, KA_LOCKED, file, line); - unlocked = false; x = lk->lk_lock; if (__predict_true(x & LK_SHARE) != 0) { if (lockmgr_sunlock_try(lk, &x)) { lockmgr_note_shared_release(lk, file, line); - unlocked = true; } else { return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line)); } @@ -1126,7 +1123,6 @@ if (!lockmgr_recursed(lk) && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { lockmgr_note_exclusive_release(lk, file, line); - unlocked = true; } else { return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line)); } Index: sys/kern/kern_lockf.c =================================================================== --- sys/kern/kern_lockf.c +++ sys/kern/kern_lockf.c @@ -2235,8 +2235,11 @@ { struct owner_edge *e; struct owner_vertex_list deltaF, deltaB; - int nF, nB, n, vi, i; + int nF, n, vi, i; int *indices; +#ifdef LOCKF_DEBUG + int nB; +#endif sx_assert(&lf_owner_graph_lock, SX_XLOCKED); @@ -2300,9 +2303,9 @@ printf("deltaF = "); graph_print_vertices(&deltaF); } + nB = #endif - - nB = graph_delta_backward(g, x, y, &deltaB); + graph_delta_backward(g, x, y, &deltaB); #ifdef LOCKF_DEBUG if (lockf_debug & 8) { Index: sys/kern/kern_physio.c =================================================================== --- sys/kern/kern_physio.c +++ sys/kern/kern_physio.c @@ -47,10 +47,13 @@ struct vm_page **pages; caddr_t sa; u_int iolen, poff; - int error, i, npages, maxpages; + int error, i, maxpages; vm_prot_t prot; + int npages; csw = dev->si_devsw; + npages = 0; + sa = NULL; /* check if character device is being destroyed */ if (csw == NULL) return (ENXIO); @@ -177,7 +180,7 @@ error = EFAULT; goto doerror; } - if (pbuf) { + if (pbuf && sa) { pmap_qenter((vm_offset_t)sa, pages, npages); bp->bio_data = sa + poff; Index: sys/kern/kern_rwlock.c =================================================================== --- sys/kern/kern_rwlock.c +++ sys/kern/kern_rwlock.c @@ -887,7 +887,7 @@ #ifdef ADAPTIVE_RWLOCKS int spintries = 0; int i, n; - enum { READERS, WRITER } sleep_reason; + enum { INVALID, READERS, WRITER } sleep_reason = INVALID; #endif uintptr_t x; #ifdef LOCK_PROFILING Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -341,8 +341,10 @@ } for (int i = 0; i < npages;) { - int j, a, count, rv; - + int j, a, count; +#ifdef INVARIANTS + int rv; +#endif /* Skip valid pages. */ if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, xfsize(i, npages, off, len))) { @@ -401,7 +403,10 @@ } refcount_acquire(&sfio->nios); - rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, +#ifdef INVARIANTS + rv = +#endif + vm_pager_get_pages_async(obj, pa + i, count, NULL, i + count == npages ? &rhpages : NULL, &sendfile_iodone, sfio); KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p", @@ -688,6 +693,7 @@ if (space == 0) { sfio = NULL; nios = 0; + npages = 0; goto prepend_header; } hdr_uio = NULL; Index: sys/kern/kern_shutdown.c =================================================================== --- sys/kern/kern_shutdown.c +++ sys/kern/kern_shutdown.c @@ -1495,10 +1495,7 @@ int dump_finish(struct dumperinfo *di, struct kerneldumpheader *kdh) { - uint64_t extent; int error; - - extent = dtoh64(kdh->dumpextent); if (di->kdcomp != NULL) { error = compressor_flush(di->kdcomp->kdc_stream); Index: sys/kern/kern_sig.c =================================================================== --- sys/kern/kern_sig.c +++ sys/kern/kern_sig.c @@ -605,11 +605,8 @@ void signotify(struct thread *td) { - struct proc *p; - p = td->td_proc; - - PROC_LOCK_ASSERT(p, MA_OWNED); + PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); if (SIGPENDING(td)) { thread_lock(td); Index: sys/kern/kern_sx.c =================================================================== --- sys/kern/kern_sx.c +++ sys/kern/kern_sx.c @@ -551,7 +551,7 @@ #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, n, spintries = 0; - enum { READERS, WRITER } sleep_reason; + enum { INVALID, READERS, WRITER } sleep_reason = INVALID; bool adaptive; #endif #ifdef LOCK_PROFILING @@ -568,7 +568,7 @@ int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - uintptr_t state; + uintptr_t state = 0; #endif int extra_work = 0; @@ -944,7 +944,7 @@ int64_t all_time = 0; #endif #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) - uintptr_t state; + uintptr_t state = 0; #endif int extra_work = 0; Index: sys/kern/kern_umtx.c =================================================================== --- sys/kern/kern_umtx.c +++ sys/kern/kern_umtx.c @@ -662,11 +662,9 @@ static int umtxq_count(struct umtx_key *key) { - struct umtxq_chain *uc; struct umtxq_queue *uh; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); if (uh != NULL) return (uh->length); @@ -680,12 +678,10 @@ static int umtxq_count_pi(struct umtx_key *key, struct umtx_q **first) { - struct umtxq_chain *uc; struct umtxq_queue *uh; *first = NULL; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); if (uh != NULL) { *first = TAILQ_FIRST(&uh->head); @@ -727,14 +723,12 @@ static int umtxq_signal_queue(struct umtx_key *key, int n_wake, int q) { - struct umtxq_chain *uc; struct umtxq_queue *uh; struct umtx_q *uq; int ret; ret = 0; - uc = umtxq_getchain(key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); uh = umtxq_queue_lookup(key, q); if (uh != NULL) { while ((uq = TAILQ_FIRST(&uh->head)) != NULL) { @@ -754,10 +748,8 @@ static inline void umtxq_signal_thread(struct umtx_q *uq) { - struct umtxq_chain *uc; - uc = umtxq_getchain(&uq->uq_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); umtxq_remove(uq); wakeup(uq); } @@ -1663,16 +1655,18 @@ umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, const char *wmesg, struct abs_timeout *timo, bool shared) { - struct umtxq_chain *uc; struct thread *td, *td1; struct umtx_q *uq1; int error, pri; +#ifdef INVARIANTS + struct umtxq_chain *uc; + uc = umtxq_getchain(&pi->pi_key); +#endif error = 0; td = uq->uq_thread; KASSERT(td == curthread, ("inconsistent uq_thread")); - uc = umtxq_getchain(&uq->uq_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); umtxq_insert(uq); mtx_lock(&umtx_lock); @@ -1728,10 +1722,8 @@ static void umtx_pi_ref(struct umtx_pi *pi) { - struct umtxq_chain *uc; - uc = umtxq_getchain(&pi->pi_key); - UMTXQ_LOCKED_ASSERT(uc); + UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key)); pi->pi_refcount++; } Index: sys/kern/subr_capability.c =================================================================== --- sys/kern/subr_capability.c +++ sys/kern/subr_capability.c @@ -49,6 +49,7 @@ #include #include #include +#define INVARIANTS #endif #ifdef _KERNEL @@ -108,12 +109,17 @@ cap_rights_vset(cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; +#endif assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); +#ifdef INVARIANTS n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); +#endif for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); @@ -133,13 +139,15 @@ cap_rights_vclear(cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); - +#endif for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); if (right == 0) @@ -158,13 +166,15 @@ cap_rights_is_vset(const cap_rights_t *rights, va_list ap) { uint64_t right; - int i, n; + int i; +#ifdef INVARIANTS + int n; assert(CAPVER(rights) == CAP_RIGHTS_VERSION_00); - n = CAPARSIZE(rights); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); - +#endif + for (;;) { right = (uint64_t)va_arg(ap, unsigned long long); if (right == 0) @@ -194,13 +204,11 @@ cap_rights_t * __cap_rights_init(int version, cap_rights_t *rights, ...) { - unsigned int n; va_list ap; assert(version == CAP_RIGHTS_VERSION_00); - n = version + 2; - assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); + assert(version >= CAPARSIZE_MIN-2 && version <= CAPARSIZE_MAX-2); CAP_NONE(rights); va_start(ap, rights); cap_rights_vset(rights, ap); @@ -339,7 +347,6 @@ n = CAPARSIZE(big); assert(n >= CAPARSIZE_MIN && n <= CAPARSIZE_MAX); - for (i = 0; i < n; i++) { if ((big->cr_rights[i] & little->cr_rights[i]) != little->cr_rights[i]) { Index: sys/kern/subr_epoch.c =================================================================== --- sys/kern/subr_epoch.c +++ sys/kern/subr_epoch.c @@ -136,7 +136,7 @@ static void epoch_init(void *arg __unused) { - int domain, count, cpu; + int domain, cpu; block_count = counter_u64_alloc(M_WAITOK); migrate_count = counter_u64_alloc(M_WAITOK); @@ -146,7 +146,7 @@ epoch_call_task_count = counter_u64_alloc(M_WAITOK); if (usedomains == false) goto done; - count = domain = 0; + domain = 0; domoffsets[0] = 0; for (domain = 0; domain < vm_ndomains; domain++) { domcount[domain] = CPU_COUNT(&cpuset_domain[domain]); @@ -361,13 +361,11 @@ void *arg __unused) { epoch_record_t record; - struct epoch_pcpu_state *eps; struct thread *td, *tdwait, *owner; struct turnstile *ts; struct lock_object *lock; int spincount, gen; - eps = arg; record = __containerof(cr, struct epoch_record, er_record); td = curthread; spincount = 0; Index: sys/kern/subr_pidctrl.c =================================================================== --- sys/kern/subr_pidctrl.c +++ sys/kern/subr_pidctrl.c @@ -103,9 +103,9 @@ pc->pc_derivative = error - pc->pc_olderror; /* Divide by inverse gain values to produce output. */ - output = ((pc->pc_error / pc->pc_Kpd) + - (pc->pc_integral / pc->pc_Kid)) + - (pc->pc_derivative / pc->pc_Kdd); + output = ((pc->pc_error / Kpd) + + (pc->pc_integral / Kid)) + + (pc->pc_derivative / Kdd); /* Save for sysctl. */ pc->pc_output = output; pc->pc_input = input; @@ -146,9 +146,9 @@ pc->pc_derivative = error - pc->pc_olderror; /* Divide by inverse gain values to produce output. */ - output = ((error / pc->pc_Kpd) + - (pc->pc_integral / pc->pc_Kid)) + - (pc->pc_derivative / pc->pc_Kdd); + output = ((error / Kpd) + + (pc->pc_integral / Kid)) + + (pc->pc_derivative / Kdd); output = MAX(output - pc->pc_output, 0); pc->pc_output += output; pc->pc_input = input; Index: sys/kern/subr_prf.c =================================================================== --- sys/kern/subr_prf.c +++ sys/kern/subr_prf.c @@ -660,6 +660,7 @@ int stop = 0, retval = 0; num = 0; + q = NULL; if (!func) d = (char *) arg; else Index: sys/kern/subr_sleepqueue.c =================================================================== --- sys/kern/subr_sleepqueue.c +++ sys/kern/subr_sleepqueue.c @@ -384,12 +384,14 @@ sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr, int flags) { - struct sleepqueue_chain *sc; struct thread *td; sbintime_t pr1; +#ifdef INVARIANTS + struct sleepqueue_chain *sc; - td = curthread; sc = SC_LOOKUP(wchan); +#endif + td = curthread; mtx_assert(&sc->sc_lock, MA_OWNED); MPASS(TD_ON_SLEEPQ(td)); MPASS(td->td_sleepqueue == NULL); @@ -780,6 +782,7 @@ static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) { +#ifdef INVARIANTS struct sleepqueue_chain *sc; MPASS(td != NULL); @@ -789,6 +792,7 @@ THREAD_LOCK_ASSERT(td, MA_OWNED); sc = SC_LOOKUP(sq->sq_wchan); mtx_assert(&sc->sc_lock, MA_OWNED); +#endif SDT_PROBE2(sched, , , wakeup, td, td->td_proc); @@ -974,7 +978,6 @@ static void sleepq_timeout(void *arg) { - struct sleepqueue_chain *sc; struct sleepqueue *sq; struct thread *td; void *wchan; @@ -996,9 +999,13 @@ * See if the thread is asleep and get the wait * channel if it is. */ - wchan = td->td_wchan; - sc = SC_LOOKUP(wchan); +#ifdef INVARIANTS + struct sleepqueue_chain *sc; + + sc = SC_LOOKUP(td->td_wchan); THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock); +#endif + wchan = td->td_wchan; sq = sleepq_lookup(wchan); MPASS(sq != NULL); td->td_flags |= TDF_TIMEOUT; Index: sys/kern/subr_turnstile.c =================================================================== --- sys/kern/subr_turnstile.c +++ sys/kern/subr_turnstile.c @@ -804,7 +804,6 @@ int turnstile_signal(struct turnstile *ts, int queue) { - struct turnstile_chain *tc; struct thread *td; int empty; @@ -833,9 +832,13 @@ empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); if (empty) { +#if defined(INVARIANTS) || defined(TURNSTILE_PROFILING) + struct turnstile_chain *tc; + tc = TC_LOOKUP(ts->ts_lockobj); mtx_assert(&tc->tc_lock, MA_OWNED); MPASS(LIST_EMPTY(&ts->ts_free)); +#endif #ifdef TURNSTILE_PROFILING tc->tc_depth--; #endif @@ -855,21 +858,24 @@ void turnstile_broadcast(struct turnstile *ts, int queue) { - struct turnstile_chain *tc; struct turnstile *ts1; struct thread *td; +#ifdef INVARIANTS + struct turnstile_chain *tc; - MPASS(ts != NULL); - mtx_assert(&ts->ts_lock, MA_OWNED); - MPASS(curthread->td_proc->p_magic == P_MAGIC); - MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); /* * We must have the chain locked so that we can remove the empty * turnstile from the hash queue. */ + MPASS(ts != NULL); tc = TC_LOOKUP(ts->ts_lockobj); mtx_assert(&tc->tc_lock, MA_OWNED); + + mtx_assert(&ts->ts_lock, MA_OWNED); + MPASS(curthread->td_proc->p_magic == P_MAGIC); + MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL); MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); +#endif /* * Transfer the blocked list to the pending list. Index: sys/kern/subr_uio.c =================================================================== --- sys/kern/subr_uio.c +++ sys/kern/subr_uio.c @@ -212,7 +212,7 @@ size_t cnt; int error, newflags, save; - error = 0; + save = error = 0; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove: mode")); @@ -275,7 +275,7 @@ n -= cnt; } out: - if (uio->uio_segflg == UIO_USERSPACE) + if (save) curthread_pflags_restore(save); return (error); } Index: sys/kern/sys_capability.c =================================================================== --- sys/kern/sys_capability.c +++ sys/kern/sys_capability.c @@ -183,7 +183,7 @@ * Convert capability rights into VM access flags. */ u_char -cap_rights_to_vmprot(cap_rights_t *havep) +cap_rights_to_vmprot(const cap_rights_t *havep) { u_char maxprot; @@ -204,14 +204,14 @@ * this one file. */ -cap_rights_t * -cap_rights_fde(struct filedescent *fdep) +const cap_rights_t * +cap_rights_fde(const struct filedescent *fdep) { return (&fdep->fde_rights); } -cap_rights_t * +const cap_rights_t * cap_rights(struct filedesc *fdp, int fd) { Index: sys/kern/sys_process.c =================================================================== --- sys/kern/sys_process.c +++ sys/kern/sys_process.c @@ -321,7 +321,6 @@ struct iovec iov; struct uio uio; ssize_t slen; - int error; MPASS(len < SSIZE_MAX); slen = (ssize_t)len; @@ -335,7 +334,7 @@ uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = rw; uio.uio_td = td; - error = proc_rwmem(p, &uio); + proc_rwmem(p, &uio); if (uio.uio_resid == slen) return (-1); return (slen - uio.uio_resid); Index: sys/kern/sysv_msg.c =================================================================== --- sys/kern/sysv_msg.c +++ sys/kern/sysv_msg.c @@ -776,7 +776,7 @@ struct prison *rpr; short next; #ifdef RACCT - size_t saved_msgsz; + size_t saved_msgsz = 0; #endif rpr = msg_find_prison(td->td_ucred); Index: sys/kern/tty_inq.c =================================================================== --- sys/kern/tty_inq.c +++ sys/kern/tty_inq.c @@ -328,13 +328,18 @@ int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes, int quote) { +#ifdef INVARIANTS size_t ret; - +#endif + if (ttyinq_bytesleft(ti) < nbytes) return (-1); /* We should always be able to write it back. */ - ret = ttyinq_write(ti, buf, nbytes, quote); +#ifdef INVARIANTS + ret = +#endif + ttyinq_write(ti, buf, nbytes, quote); MPASS(ret == nbytes); return (0); Index: sys/kern/tty_outq.c =================================================================== --- sys/kern/tty_outq.c +++ sys/kern/tty_outq.c @@ -324,13 +324,18 @@ int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes) { +#ifdef INVARIANTS size_t ret; +#endif if (ttyoutq_bytesleft(to) < nbytes) return (-1); /* We should always be able to write it back. */ - ret = ttyoutq_write(to, buf, nbytes); +#ifdef INVARIANTS + ret = +#endif + ttyoutq_write(to, buf, nbytes); MPASS(ret == nbytes); return (0); Index: sys/kern/uipc_mqueue.c =================================================================== --- sys/kern/uipc_mqueue.c +++ sys/kern/uipc_mqueue.c @@ -1343,14 +1343,12 @@ char buf[80]; struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; - struct mqfs_node *pn; struct mqueue *mq; int len, error; if (vp->v_type != VREG) return (EINVAL); - pn = VTON(vp); mq = VTOMQ(vp); snprintf(buf, sizeof(buf), "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", @@ -2439,11 +2437,13 @@ static void mqueue_fdclose(struct thread *td, int fd, struct file *fp) { - struct filedesc *fdp; struct mqueue *mq; +#ifdef INVARIANTS + struct filedesc *fdp; fdp = td->td_proc->p_fd; FILEDESC_LOCK_ASSERT(fdp); +#endif if (fp->f_ops == &mqueueops) { mq = FPTOMQ(fp); Index: sys/kern/uipc_sockbuf.c =================================================================== --- sys/kern/uipc_sockbuf.c +++ sys/kern/uipc_sockbuf.c @@ -460,6 +460,7 @@ u_int *hiwat, *lowat; int error; + sb = NULL; SOCK_LOCK(so); if (SOLISTENING(so)) { switch (cmd) { Index: sys/kern/uipc_usrreq.c =================================================================== --- sys/kern/uipc_usrreq.c +++ sys/kern/uipc_usrreq.c @@ -765,6 +765,7 @@ KASSERT(unp != NULL, ("uipc_detach: unp == NULL")); vp = NULL; + vplock = NULL; local_unp_rights = 0; UNP_LINK_WLOCK(); @@ -787,7 +788,8 @@ } if (unp->unp_vnode != vp && unp->unp_vnode != NULL) { - mtx_unlock(vplock); + if (vplock) + mtx_unlock(vplock); UNP_PCB_UNLOCK(unp); if (unp2) UNP_PCB_UNLOCK(unp2); @@ -1676,7 +1678,9 @@ unp_disconnect(struct unpcb *unp, struct unpcb *unp2) { struct socket *so, *so2; - int rele, freed; +#ifdef INVARIANTS + int freed; +#endif KASSERT(unp2 != NULL, ("unp_disconnect: unp2 == NULL")); @@ -1688,7 +1692,6 @@ MPASS(unp->unp_conn == unp2); unp->unp_conn = NULL; - rele = 0; so = unp->unp_socket; so2 = unp2->unp_socket; switch (unp->unp_socket->so_type) { @@ -1713,9 +1716,15 @@ soisdisconnected(so2); break; } - freed = unp_pcb_rele(unp); +#ifdef INVARIANTS + freed = +#endif + unp_pcb_rele(unp); MPASS(freed == 0); - freed = unp_pcb_rele(unp2); +#ifdef INVARIANTS + freed = +#endif + unp_pcb_rele(unp2); MPASS(freed == 0); } Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -1851,10 +1851,8 @@ static void bd_init(struct bufdomain *bd) { - int domain; int i; - domain = bd - bdomain; bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1]; bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock"); bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock"); @@ -2843,16 +2841,19 @@ vm_ooffset_t foff; vm_page_t m; vm_object_t obj; - struct vnode *vp; int i, iosize, resid; bool bogus; +#ifdef INVARIANTS + struct vnode *vp; + + vp = bp->b_vp; +#endif obj = bp->b_bufobj->bo_object; KASSERT(obj->paging_in_progress >= bp->b_npages, ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)", obj->paging_in_progress, bp->b_npages)); - vp = bp->b_vp; KASSERT(vp->v_holdcnt > 0, ("vfs_vmio_iodone: vnode %p has zero hold count", vp)); KASSERT(vp->v_object != NULL, @@ -5014,14 +5015,18 @@ void bufstrategy(struct bufobj *bo, struct buf *bp) { - int i = 0; struct vnode *vp; - +#ifdef INVARIANTS + int i = 0; +#endif vp = bp->b_vp; KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); - i = VOP_STRATEGY(vp, bp); +#ifdef INVARIANTS + i = +#endif + VOP_STRATEGY(vp, bp); KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); } Index: sys/kern/vfs_cache.c =================================================================== --- sys/kern/vfs_cache.c +++ sys/kern/vfs_cache.c @@ -752,6 +752,7 @@ int i; *ncpp = ncp = NULL; + neglist = NULL; for (i = start; i < numneglists; i++) { neglist = &neglists[i]; @@ -1230,7 +1231,7 @@ struct namecache_ts *ncp_ts; struct namecache *ncp; struct rwlock *blp; - struct mtx *dvlp, *dvlp2; + struct mtx *dvlp; uint32_t hash; int error, ltype; @@ -1249,12 +1250,12 @@ retry: blp = NULL; + dvlp = NULL; error = 0; if (cnp->cn_namelen == 2 && cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { counter_u64_add(dotdothits, 1); dvlp = VP2VNODELOCK(dvp); - dvlp2 = NULL; mtx_lock(dvlp); ncp = dvp->v_cache_dd; if (ncp == NULL) { @@ -1347,7 +1348,8 @@ VOP_UNLOCK(dvp, 0); } vhold(*vpp); - cache_lookup_unlock(blp, dvlp); + if (dvlp != NULL && blp != NULL) + cache_lookup_unlock(blp, dvlp); error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread); if (cnp->cn_flags & ISDOTDOT) { vn_lock(dvp, ltype | LK_RETRY); @@ -1629,6 +1631,7 @@ cache_celockstate_init(&cel); ndd = NULL; + ncp_ts = NULL; flag = 0; if (cnp->cn_nameptr[0] == '.') { if (cnp->cn_namelen == 1) Index: sys/kern/vfs_lookup.c =================================================================== --- sys/kern/vfs_lookup.c +++ sys/kern/vfs_lookup.c @@ -92,16 +92,20 @@ crossmp_vop_lock1(struct vop_lock1_args *ap) { struct vnode *vp; + int flags; +#if defined(WITNESS) || defined(KTR) struct lock *lk; const char *file; - int flags, line; + int line; - vp = ap->a_vp; - lk = vp->v_vnlock; - flags = ap->a_flags; + lk = ap->a_vp->v_vnlock; file = ap->a_file; line = ap->a_line; +#endif + vp = ap->a_vp; + flags = ap->a_flags; + if ((flags & LK_SHARED) == 0) panic("invalid lock request for crossmp"); @@ -110,7 +114,7 @@ WITNESS_LOCK(&lk->lock_object, 0, file, line); if ((flags & LK_INTERLOCK) != 0) VI_UNLOCK(vp); - LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, ap->a_file, line); + LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); return (0); } @@ -118,11 +122,13 @@ crossmp_vop_unlock(struct vop_unlock_args *ap) { struct vnode *vp; - struct lock *lk; int flags; +#if defined(WITNESS) || defined(KTR) + struct lock *lk; + lk = ap->a_vp->v_vnlock; +#endif vp = ap->a_vp; - lk = vp->v_vnlock; flags = ap->a_flags; if ((flags & LK_INTERLOCK) != 0) Index: sys/kern/vfs_subr.c =================================================================== --- sys/kern/vfs_subr.c +++ sys/kern/vfs_subr.c @@ -1405,7 +1405,6 @@ struct thread *td; struct lock_object *lo; static int cyclecount; - int error; CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); vp = NULL; @@ -1436,7 +1435,7 @@ else if (freevnodes > 0) vnlru_free_locked(1, NULL); else { - error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & + getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)); #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ if (error != 0) {