diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -140,6 +140,7 @@ static void vtnet_rx_vq_intr(void *); static void vtnet_rxq_tq_intr(void *, int); +static int vtnet_txq_intr_threshold(struct vtnet_txq *); static int vtnet_txq_below_threshold(struct vtnet_txq *); static int vtnet_txq_notify(struct vtnet_txq *); static void vtnet_txq_free_mbufs(struct vtnet_txq *); @@ -219,7 +220,6 @@ static void vtnet_attached_set_macaddr(struct vtnet_softc *); static void vtnet_vlan_tag_remove(struct mbuf *); static void vtnet_set_rx_process_limit(struct vtnet_softc *); -static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_rxq *); @@ -1090,7 +1090,6 @@ } vtnet_set_rx_process_limit(sc); - vtnet_set_tx_intr_threshold(sc); DEBUGNET_SET(ifp, vtnet); @@ -2024,15 +2023,42 @@ } static int -vtnet_txq_below_threshold(struct vtnet_txq *txq) +vtnet_txq_intr_threshold(struct vtnet_txq *txq) { struct vtnet_softc *sc; - struct virtqueue *vq; + int threshold; sc = txq->vtntx_sc; + + /* + * The Tx interrupt is disabled until the queue free count falls + * below our threshold. Completed frames are drained from the Tx + * virtqueue before transmitting new frames and in the watchdog + * callout, so the frequency of Tx interrupts is greatly reduced, + * at the cost of not freeing mbufs as quickly as they otherwise + * would be. + */ + threshold = virtqueue_size(txq->vtntx_vq) / 4; + + /* + * Without indirect descriptors, leave enough room for the most + * segments we handle. + */ + if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && + threshold < sc->vtnet_tx_nsegs) + threshold = sc->vtnet_tx_nsegs; + + return (threshold); +} + +static int +vtnet_txq_below_threshold(struct vtnet_txq *txq) +{ + struct virtqueue *vq; + vq = txq->vtntx_vq; - return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); + return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold); } static int @@ -3058,6 +3084,7 @@ for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; txq->vtntx_watchdog = 0; + txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq); #ifdef DEV_NETMAP netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0); #endif /* DEV_NETMAP */ @@ -3749,36 +3776,6 @@ sc->vtnet_rx_process_limit = limit; } -static void -vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) -{ - int size, thresh; - - size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); - - /* - * The Tx interrupt is disabled until the queue free count falls - * below our threshold. Completed frames are drained from the Tx - * virtqueue before transmitting new frames and in the watchdog - * callout, so the frequency of Tx interrupts is greatly reduced, - * at the cost of not freeing mbufs as quickly as they otherwise - * would be. - * - * N.B. We assume all the Tx queues are the same size. - */ - thresh = size / 4; - - /* - * Without indirect descriptors, leave enough room for the most - * segments we handle. - */ - if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && - thresh < sc->vtnet_tx_nsegs) - thresh = sc->vtnet_tx_nsegs; - - sc->vtnet_tx_intr_thresh = thresh; -} - static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_rxq *rxq) diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h --- a/sys/dev/virtio/network/if_vtnetvar.h +++ b/sys/dev/virtio/network/if_vtnetvar.h @@ -112,6 +112,7 @@ #endif int vtntx_id; int vtntx_watchdog; + int vtntx_intr_threshold; struct vtnet_txq_stats vtntx_stats; struct taskqueue *vtntx_tq; struct task vtntx_intrtask; @@ -161,7 +162,6 @@ int vtnet_rx_nsegs; int vtnet_rx_nmbufs; int vtnet_rx_clustersz; - int vtnet_tx_intr_thresh; int vtnet_tx_nsegs; int vtnet_if_flags; int vtnet_max_mtu;