Index: head/sys/net/iflib.c =================================================================== --- head/sys/net/iflib.c +++ head/sys/net/iflib.c @@ -3515,8 +3515,7 @@ } if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE); - else - ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); + ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { @@ -3718,16 +3717,14 @@ DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE); + GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { - GROUPTASK_ENQUEUE(&txq->ift_task); /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); - } else if (TXQ_AVAIL(txq) < (txq->ift_size >> 1)) { - GROUPTASK_ENQUEUE(&txq->ift_task); } return (err); Index: head/sys/net/mp_ring.c =================================================================== --- head/sys/net/mp_ring.c +++ head/sys/net/mp_ring.c @@ -454,18 +454,12 @@ do { os.state = ns.state = r->state; ns.pidx_tail = pidx_stop; - ns.flags = BUSY; + if (os.flags == IDLE) + ns.flags = ABDICATED; } while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0); critical_exit(); counter_u64_add(r->enqueues, n); - /* - * Turn into a consumer if some other thread isn't active as a consumer - * already. - */ - if (os.flags != BUSY) - drain_ring_lockless(r, ns, os.flags, budget); - return (0); } #endif @@ -476,7 +470,9 @@ union ring_state os, ns; os.state = r->state; - if (os.flags != STALLED || os.pidx_head != os.pidx_tail || r->can_drain(r) == 0) + if ((os.flags != STALLED && os.flags != ABDICATED) || // Only continue in STALLED and ABDICATED + os.pidx_head != os.pidx_tail || // Require work to be available + (os.flags != ABDICATED && r->can_drain(r) == 0)) // Can either drain, or everyone left return; MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */