Index: sys/kern/kern_intr.c =================================================================== --- sys/kern/kern_intr.c +++ sys/kern/kern_intr.c @@ -1208,6 +1208,7 @@ struct thread *td; struct proc *p; int wake, epoch_count; + bool needs_epoch; td = curthread; p = td->td_proc; @@ -1242,20 +1243,22 @@ * that the load of ih_need in ithread_execute_handlers() * is ordered after the load of it_need here. */ - if (ie->ie_hflags & IH_NET) { + needs_epoch = + (atomic_load_int(&ie->ie_hflags) & IH_NET) ? true : false; + if (needs_epoch) { epoch_count = 0; NET_EPOCH_ENTER(et); } while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { ithread_execute_handlers(p, ie); - if ((ie->ie_hflags & IH_NET) && + if (needs_epoch && ++epoch_count >= intr_epoch_batch) { NET_EPOCH_EXIT(et); epoch_count = 0; NET_EPOCH_ENTER(et); } } - if (ie->ie_hflags & IH_NET) + if (needs_epoch) NET_EPOCH_EXIT(et); WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); mtx_assert(&Giant, MA_NOTOWNED);