Index: head/sys/dev/hyperv/netvsc/if_hn.c =================================================================== --- head/sys/dev/hyperv/netvsc/if_hn.c +++ head/sys/dev/hyperv/netvsc/if_hn.c @@ -169,6 +169,8 @@ #define HN_PKTSIZE(m, align) \ roundup2((m)->m_pkthdr.len + HN_RNDIS_PKT_LEN, (align)) +#define HN_RING_IDX2CPU(sc, idx) (((sc)->hn_cpu + (idx)) % mp_ncpus) + struct hn_txdesc { #ifndef HN_USE_TXDESC_BUFRING SLIST_ENTRY(hn_txdesc) link; @@ -411,15 +413,19 @@ #endif #endif -/* Use shared TX taskqueue */ -static int hn_share_tx_taskq = 0; -SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN, - &hn_share_tx_taskq, 0, "Enable shared TX taskqueue"); - static int hn_tx_taskq_cnt = 1; SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_cnt, CTLFLAG_RDTUN, &hn_tx_taskq_cnt, 0, "# of TX taskqueues"); +#define HN_TX_TASKQ_M_INDEP 0 +#define HN_TX_TASKQ_M_GLOBAL 1 +#define HN_TX_TASKQ_M_EVTTQ 2 + +static int hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP; +SYSCTL_INT(_hw_hn, OID_AUTO, tx_taskq_mode, CTLFLAG_RDTUN, + &hn_tx_taskq_mode, 0, "TX taskqueue modes: " + "0 - independent, 1 - share global tx taskqs, 2 - share event taskqs"); + #ifndef HN_USE_TXDESC_BUFRING static int hn_use_txdesc_bufring = 0; #else @@ -902,7 +908,7 @@ /* * Setup taskqueue for transmission. */ - if (hn_tx_taskque == NULL) { + if (hn_tx_taskq_mode == HN_TX_TASKQ_M_INDEP) { int i; sc->hn_tx_taskqs = @@ -915,7 +921,7 @@ taskqueue_start_threads(&sc->hn_tx_taskqs[i], 1, PI_NET, "%s tx%d", device_get_nameunit(dev), i); } - } else { + } else if (hn_tx_taskq_mode == HN_TX_TASKQ_M_GLOBAL) { sc->hn_tx_taskqs = hn_tx_taskque; } @@ -1216,7 +1222,7 @@ hn_destroy_rx_data(sc); hn_destroy_tx_data(sc); - if (sc->hn_tx_taskqs != hn_tx_taskque) { + if (sc->hn_tx_taskqs != NULL && sc->hn_tx_taskqs != hn_tx_taskque) { int i; for (i = 0; i < hn_tx_taskq_cnt; ++i) @@ -3312,7 +3318,12 @@ M_WAITOK, &txr->hn_tx_lock); #endif - txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt]; + if (hn_tx_taskq_mode == HN_TX_TASKQ_M_EVTTQ) { + txr->hn_tx_taskq = VMBUS_GET_EVENT_TASKQ( + device_get_parent(dev), dev, HN_RING_IDX2CPU(sc, id)); + } else { + txr->hn_tx_taskq = sc->hn_tx_taskqs[id % hn_tx_taskq_cnt]; + } #ifdef HN_IFSTART_SUPPORT if (hn_use_if_start) { @@ -4205,7 +4216,7 @@ } /* Bind this channel to a proper CPU. */ - vmbus_chan_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus); + vmbus_chan_cpu_set(chan, HN_RING_IDX2CPU(sc, idx)); /* * Open this channel @@ -5361,10 +5372,23 @@ else if (hn_tx_taskq_cnt > mp_ncpus) hn_tx_taskq_cnt = mp_ncpus; + /* + * Fix the TX taskqueue mode. + */ + switch (hn_tx_taskq_mode) { + case HN_TX_TASKQ_M_INDEP: + case HN_TX_TASKQ_M_GLOBAL: + case HN_TX_TASKQ_M_EVTTQ: + break; + default: + hn_tx_taskq_mode = HN_TX_TASKQ_M_INDEP; + break; + } + if (vm_guest != VM_GUEST_HV) return; - if (!hn_share_tx_taskq) + if (hn_tx_taskq_mode != HN_TX_TASKQ_M_GLOBAL) return; hn_tx_taskque = malloc(hn_tx_taskq_cnt * sizeof(struct taskqueue *),