Changeset View
Changeset View
Standalone View
Standalone View
head/sys/dev/hyperv/netvsc/if_hn.c
Show First 20 Lines • Show All 153 Lines • ▼ Show 20 Lines | |||||
#define HN_CSUM_IP_MASK (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP) | #define HN_CSUM_IP_MASK (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP) | ||||
#define HN_CSUM_IP6_MASK (CSUM_IP6_TCP | CSUM_IP6_UDP) | #define HN_CSUM_IP6_MASK (CSUM_IP6_TCP | CSUM_IP6_UDP) | ||||
#define HN_CSUM_IP_HWASSIST(sc) \ | #define HN_CSUM_IP_HWASSIST(sc) \ | ||||
((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP_MASK) | ((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP_MASK) | ||||
#define HN_CSUM_IP6_HWASSIST(sc) \ | #define HN_CSUM_IP6_HWASSIST(sc) \ | ||||
((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP6_MASK) | ((sc)->hn_tx_ring[0].hn_csum_assist & HN_CSUM_IP6_MASK) | ||||
#define HN_PKTSIZE_MIN(align) \ | |||||
roundup2(ETHER_MIN_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN + \ | |||||
HN_RNDIS_PKT_LEN, (align)) | |||||
#define HN_PKTSIZE(m, align) \ | |||||
roundup2((m)->m_pkthdr.len + HN_RNDIS_PKT_LEN, (align)) | |||||
struct hn_txdesc { | struct hn_txdesc { | ||||
#ifndef HN_USE_TXDESC_BUFRING | #ifndef HN_USE_TXDESC_BUFRING | ||||
SLIST_ENTRY(hn_txdesc) link; | SLIST_ENTRY(hn_txdesc) link; | ||||
#endif | #endif | ||||
STAILQ_ENTRY(hn_txdesc) agg_link; | |||||
/* Aggregated txdescs, in sending order. */ | |||||
STAILQ_HEAD(, hn_txdesc) agg_list; | |||||
/* The oldest packet, if transmission aggregation happens. */ | |||||
struct mbuf *m; | struct mbuf *m; | ||||
struct hn_tx_ring *txr; | struct hn_tx_ring *txr; | ||||
int refs; | int refs; | ||||
uint32_t flags; /* HN_TXD_FLAG_ */ | uint32_t flags; /* HN_TXD_FLAG_ */ | ||||
struct hn_nvs_sendctx send_ctx; | struct hn_nvs_sendctx send_ctx; | ||||
uint32_t chim_index; | uint32_t chim_index; | ||||
int chim_size; | int chim_size; | ||||
bus_dmamap_t data_dmap; | bus_dmamap_t data_dmap; | ||||
bus_addr_t rndis_pkt_paddr; | bus_addr_t rndis_pkt_paddr; | ||||
struct rndis_packet_msg *rndis_pkt; | struct rndis_packet_msg *rndis_pkt; | ||||
bus_dmamap_t rndis_pkt_dmap; | bus_dmamap_t rndis_pkt_dmap; | ||||
}; | }; | ||||
#define HN_TXD_FLAG_ONLIST 0x0001 | #define HN_TXD_FLAG_ONLIST 0x0001 | ||||
#define HN_TXD_FLAG_DMAMAP 0x0002 | #define HN_TXD_FLAG_DMAMAP 0x0002 | ||||
#define HN_TXD_FLAG_ONAGG 0x0004 | |||||
struct hn_rxinfo { | struct hn_rxinfo { | ||||
uint32_t vlan_info; | uint32_t vlan_info; | ||||
uint32_t csum_info; | uint32_t csum_info; | ||||
uint32_t hash_info; | uint32_t hash_info; | ||||
uint32_t hash_value; | uint32_t hash_value; | ||||
}; | }; | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | |||||
static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_caps_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_caps_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_hwassist_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_hwassist_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_rxfilter_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_rxfilter_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_rss_key_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_rss_key_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_rss_ind_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_rss_ind_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_rss_hash_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_rss_hash_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_txagg_size_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static int hn_txagg_pkts_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static int hn_txagg_pktmax_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static int hn_txagg_align_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static void hn_stop(struct hn_softc *); | static void hn_stop(struct hn_softc *); | ||||
static void hn_init_locked(struct hn_softc *); | static void hn_init_locked(struct hn_softc *); | ||||
static int hn_chan_attach(struct hn_softc *, | static int hn_chan_attach(struct hn_softc *, | ||||
struct vmbus_channel *); | struct vmbus_channel *); | ||||
static void hn_chan_detach(struct hn_softc *, | static void hn_chan_detach(struct hn_softc *, | ||||
struct vmbus_channel *); | struct vmbus_channel *); | ||||
static int hn_attach_subchans(struct hn_softc *); | static int hn_attach_subchans(struct hn_softc *); | ||||
Show All 31 Lines | static int hn_rxpkt(struct hn_rx_ring *, const void *, | ||||
int, const struct hn_rxinfo *); | int, const struct hn_rxinfo *); | ||||
static int hn_tx_ring_create(struct hn_softc *, int); | static int hn_tx_ring_create(struct hn_softc *, int); | ||||
static void hn_tx_ring_destroy(struct hn_tx_ring *); | static void hn_tx_ring_destroy(struct hn_tx_ring *); | ||||
static int hn_create_tx_data(struct hn_softc *, int); | static int hn_create_tx_data(struct hn_softc *, int); | ||||
static void hn_fixup_tx_data(struct hn_softc *); | static void hn_fixup_tx_data(struct hn_softc *); | ||||
static void hn_destroy_tx_data(struct hn_softc *); | static void hn_destroy_tx_data(struct hn_softc *); | ||||
static void hn_txdesc_dmamap_destroy(struct hn_txdesc *); | static void hn_txdesc_dmamap_destroy(struct hn_txdesc *); | ||||
static int hn_encap(struct hn_tx_ring *, | static int hn_encap(struct ifnet *, struct hn_tx_ring *, | ||||
struct hn_txdesc *, struct mbuf **); | struct hn_txdesc *, struct mbuf **); | ||||
static int hn_txpkt(struct ifnet *, struct hn_tx_ring *, | static int hn_txpkt(struct ifnet *, struct hn_tx_ring *, | ||||
struct hn_txdesc *); | struct hn_txdesc *); | ||||
static void hn_set_chim_size(struct hn_softc *, int); | static void hn_set_chim_size(struct hn_softc *, int); | ||||
static void hn_set_tso_maxsize(struct hn_softc *, int, int); | static void hn_set_tso_maxsize(struct hn_softc *, int, int); | ||||
static bool hn_tx_ring_pending(struct hn_tx_ring *); | static bool hn_tx_ring_pending(struct hn_tx_ring *); | ||||
static void hn_tx_ring_qflush(struct hn_tx_ring *); | static void hn_tx_ring_qflush(struct hn_tx_ring *); | ||||
static void hn_resume_tx(struct hn_softc *, int); | static void hn_resume_tx(struct hn_softc *, int); | ||||
static void hn_set_txagg(struct hn_softc *); | |||||
static void *hn_try_txagg(struct ifnet *, | |||||
struct hn_tx_ring *, struct hn_txdesc *, | |||||
int); | |||||
static int hn_get_txswq_depth(const struct hn_tx_ring *); | static int hn_get_txswq_depth(const struct hn_tx_ring *); | ||||
static void hn_txpkt_done(struct hn_nvs_sendctx *, | static void hn_txpkt_done(struct hn_nvs_sendctx *, | ||||
struct hn_softc *, struct vmbus_channel *, | struct hn_softc *, struct vmbus_channel *, | ||||
const void *, int); | const void *, int); | ||||
static int hn_txpkt_sglist(struct hn_tx_ring *, | static int hn_txpkt_sglist(struct hn_tx_ring *, | ||||
struct hn_txdesc *); | struct hn_txdesc *); | ||||
static int hn_txpkt_chim(struct hn_tx_ring *, | static int hn_txpkt_chim(struct hn_tx_ring *, | ||||
struct hn_txdesc *); | struct hn_txdesc *); | ||||
▲ Show 20 Lines • Show All 99 Lines • ▼ Show 20 Lines | |||||
/* Enable sorted LRO, and the depth of the per-channel mbuf queue */ | /* Enable sorted LRO, and the depth of the per-channel mbuf queue */ | ||||
#if __FreeBSD_version >= 1100095 | #if __FreeBSD_version >= 1100095 | ||||
static u_int hn_lro_mbufq_depth = 0; | static u_int hn_lro_mbufq_depth = 0; | ||||
SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, | SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, | ||||
&hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue"); | &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue"); | ||||
#endif | #endif | ||||
/* Packet transmission aggregation size limit */ | |||||
static int hn_tx_agg_size = -1; | |||||
SYSCTL_INT(_hw_hn, OID_AUTO, tx_agg_size, CTLFLAG_RDTUN, | |||||
&hn_tx_agg_size, 0, "Packet transmission aggregation size limit"); | |||||
/* Packet transmission aggregation count limit */ | |||||
static int hn_tx_agg_pkts = 0; | |||||
SYSCTL_INT(_hw_hn, OID_AUTO, tx_agg_pkts, CTLFLAG_RDTUN, | |||||
&hn_tx_agg_pkts, 0, "Packet transmission aggregation packet limit"); | |||||
static u_int hn_cpu_index; /* next CPU for channel */ | static u_int hn_cpu_index; /* next CPU for channel */ | ||||
static struct taskqueue *hn_tx_taskq; /* shared TX taskqueue */ | static struct taskqueue *hn_tx_taskq; /* shared TX taskqueue */ | ||||
static const uint8_t | static const uint8_t | ||||
hn_rss_key_default[NDIS_HASH_KEYSIZE_TOEPLITZ] = { | hn_rss_key_default[NDIS_HASH_KEYSIZE_TOEPLITZ] = { | ||||
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, | 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, | ||||
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, | 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, | ||||
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, | 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, | ||||
▲ Show 20 Lines • Show All 212 Lines • ▼ Show 20 Lines | #endif | ||||
if (sc->hn_rx_filter != filter) { | if (sc->hn_rx_filter != filter) { | ||||
error = hn_rndis_set_rxfilter(sc, filter); | error = hn_rndis_set_rxfilter(sc, filter); | ||||
if (!error) | if (!error) | ||||
sc->hn_rx_filter = filter; | sc->hn_rx_filter = filter; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | |||||
hn_set_txagg(struct hn_softc *sc) | |||||
{ | |||||
uint32_t size, pkts; | |||||
int i; | |||||
/* | |||||
* Setup aggregation size. | |||||
*/ | |||||
if (sc->hn_agg_size < 0) | |||||
size = UINT32_MAX; | |||||
else | |||||
size = sc->hn_agg_size; | |||||
if (sc->hn_rndis_agg_size < size) | |||||
size = sc->hn_rndis_agg_size; | |||||
if (size <= 2 * HN_PKTSIZE_MIN(sc->hn_rndis_agg_align)) { | |||||
/* Disable */ | |||||
size = 0; | |||||
pkts = 0; | |||||
goto done; | |||||
} | |||||
/* NOTE: Type of the per TX ring setting is 'int'. */ | |||||
if (size > INT_MAX) | |||||
size = INT_MAX; | |||||
/* NOTE: We only aggregate packets using chimney sending buffers. */ | |||||
if (size > (uint32_t)sc->hn_chim_szmax) | |||||
size = sc->hn_chim_szmax; | |||||
/* | |||||
* Setup aggregation packet count. | |||||
*/ | |||||
if (sc->hn_agg_pkts < 0) | |||||
pkts = UINT32_MAX; | |||||
else | |||||
pkts = sc->hn_agg_pkts; | |||||
if (sc->hn_rndis_agg_pkts < pkts) | |||||
pkts = sc->hn_rndis_agg_pkts; | |||||
if (pkts <= 1) { | |||||
/* Disable */ | |||||
size = 0; | |||||
pkts = 0; | |||||
goto done; | |||||
} | |||||
/* NOTE: Type of the per TX ring setting is 'short'. */ | |||||
if (pkts > SHRT_MAX) | |||||
pkts = SHRT_MAX; | |||||
done: | |||||
/* NOTE: Type of the per TX ring setting is 'short'. */ | |||||
if (sc->hn_rndis_agg_align > SHRT_MAX) { | |||||
/* Disable */ | |||||
size = 0; | |||||
pkts = 0; | |||||
} | |||||
if (bootverbose) { | |||||
if_printf(sc->hn_ifp, "TX agg size %u, pkts %u, align %u\n", | |||||
size, pkts, sc->hn_rndis_agg_align); | |||||
} | |||||
for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { | |||||
struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; | |||||
mtx_lock(&txr->hn_tx_lock); | |||||
txr->hn_agg_szmax = size; | |||||
txr->hn_agg_pktmax = pkts; | |||||
txr->hn_agg_align = sc->hn_rndis_agg_align; | |||||
mtx_unlock(&txr->hn_tx_lock); | |||||
} | |||||
} | |||||
static int | static int | ||||
hn_get_txswq_depth(const struct hn_tx_ring *txr) | hn_get_txswq_depth(const struct hn_tx_ring *txr) | ||||
{ | { | ||||
KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet")); | KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet")); | ||||
if (hn_tx_swq_depth < txr->hn_txdesc_cnt) | if (hn_tx_swq_depth < txr->hn_txdesc_cnt) | ||||
return txr->hn_txdesc_cnt; | return txr->hn_txdesc_cnt; | ||||
return hn_tx_swq_depth; | return hn_tx_swq_depth; | ||||
▲ Show 20 Lines • Show All 111 Lines • ▼ Show 20 Lines | hn_attach(device_t dev) | ||||
struct ifnet *ifp = NULL; | struct ifnet *ifp = NULL; | ||||
int error, ring_cnt, tx_ring_cnt; | int error, ring_cnt, tx_ring_cnt; | ||||
sc->hn_dev = dev; | sc->hn_dev = dev; | ||||
sc->hn_prichan = vmbus_get_channel(dev); | sc->hn_prichan = vmbus_get_channel(dev); | ||||
HN_LOCK_INIT(sc); | HN_LOCK_INIT(sc); | ||||
/* | /* | ||||
* Initialize these tunables once. | |||||
*/ | |||||
sc->hn_agg_size = hn_tx_agg_size; | |||||
sc->hn_agg_pkts = hn_tx_agg_pkts; | |||||
/* | |||||
* Setup taskqueue for transmission. | * Setup taskqueue for transmission. | ||||
*/ | */ | ||||
if (hn_tx_taskq == NULL) { | if (hn_tx_taskq == NULL) { | ||||
sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, | sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, | ||||
taskqueue_thread_enqueue, &sc->hn_tx_taskq); | taskqueue_thread_enqueue, &sc->hn_tx_taskq); | ||||
if (hn_bind_tx_taskq >= 0) { | if (hn_bind_tx_taskq >= 0) { | ||||
int cpu = hn_bind_tx_taskq; | int cpu = hn_bind_tx_taskq; | ||||
cpuset_t cpu_set; | cpuset_t cpu_set; | ||||
▲ Show 20 Lines • Show All 138 Lines • ▼ Show 20 Lines | #endif | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rss_ind_size", | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rss_ind_size", | ||||
CTLFLAG_RD, &sc->hn_rss_ind_size, 0, "RSS indirect entry count"); | CTLFLAG_RD, &sc->hn_rss_ind_size, 0, "RSS indirect entry count"); | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_key", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_key", | ||||
CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | ||||
hn_rss_key_sysctl, "IU", "RSS key"); | hn_rss_key_sysctl, "IU", "RSS key"); | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_ind", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rss_ind", | ||||
CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | ||||
hn_rss_ind_sysctl, "IU", "RSS indirect table"); | hn_rss_ind_sysctl, "IU", "RSS indirect table"); | ||||
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_size", | |||||
CTLFLAG_RD, &sc->hn_rndis_agg_size, 0, | |||||
"RNDIS offered packet transmission aggregation size limit"); | |||||
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_pkts", | |||||
CTLFLAG_RD, &sc->hn_rndis_agg_pkts, 0, | |||||
"RNDIS offered packet transmission aggregation count limit"); | |||||
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rndis_agg_align", | |||||
CTLFLAG_RD, &sc->hn_rndis_agg_align, 0, | |||||
"RNDIS packet transmission aggregation alignment"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_size", | |||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | |||||
hn_txagg_size_sysctl, "I", | |||||
"Packet transmission aggregation size, 0 -- disable, -1 -- auto"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_pkts", | |||||
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, | |||||
hn_txagg_pkts_sysctl, "I", | |||||
"Packet transmission aggregation packets, " | |||||
"0 -- disable, -1 -- auto"); | |||||
/* | /* | ||||
* Setup the ifmedia, which has been initialized earlier. | * Setup the ifmedia, which has been initialized earlier. | ||||
*/ | */ | ||||
ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO); | ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO); | ||||
/* XXX ifmedia_set really should do this for us */ | /* XXX ifmedia_set really should do this for us */ | ||||
sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media; | sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media; | ||||
▲ Show 20 Lines • Show All 234 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static __inline int | static __inline int | ||||
hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) | hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) | ||||
{ | { | ||||
KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, | KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, | ||||
("put an onlist txd %#x", txd->flags)); | ("put an onlist txd %#x", txd->flags)); | ||||
KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0, | |||||
("put an onagg txd %#x", txd->flags)); | |||||
KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); | KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs)); | ||||
if (atomic_fetchadd_int(&txd->refs, -1) != 1) | if (atomic_fetchadd_int(&txd->refs, -1) != 1) | ||||
return 0; | return 0; | ||||
if (!STAILQ_EMPTY(&txd->agg_list)) { | |||||
struct hn_txdesc *tmp_txd; | |||||
while ((tmp_txd = STAILQ_FIRST(&txd->agg_list)) != NULL) { | |||||
int freed; | |||||
KASSERT(STAILQ_EMPTY(&tmp_txd->agg_list), | |||||
("resursive aggregation on aggregated txdesc")); | |||||
KASSERT((tmp_txd->flags & HN_TXD_FLAG_ONAGG), | |||||
("not aggregated txdesc")); | |||||
KASSERT((tmp_txd->flags & HN_TXD_FLAG_DMAMAP) == 0, | |||||
("aggregated txdesc uses dmamap")); | |||||
KASSERT(tmp_txd->chim_index == HN_NVS_CHIM_IDX_INVALID, | |||||
("aggregated txdesc consumes " | |||||
"chimney sending buffer")); | |||||
KASSERT(tmp_txd->chim_size == 0, | |||||
("aggregated txdesc has non-zero " | |||||
"chimney sending size")); | |||||
STAILQ_REMOVE_HEAD(&txd->agg_list, agg_link); | |||||
tmp_txd->flags &= ~HN_TXD_FLAG_ONAGG; | |||||
freed = hn_txdesc_put(txr, tmp_txd); | |||||
KASSERT(freed, ("failed to free aggregated txdesc")); | |||||
} | |||||
} | |||||
if (txd->chim_index != HN_NVS_CHIM_IDX_INVALID) { | if (txd->chim_index != HN_NVS_CHIM_IDX_INVALID) { | ||||
KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, | KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, | ||||
("chim txd uses dmamap")); | ("chim txd uses dmamap")); | ||||
hn_chim_free(txr->hn_sc, txd->chim_index); | hn_chim_free(txr->hn_sc, txd->chim_index); | ||||
txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | ||||
txd->chim_size = 0; | |||||
} else if (txd->flags & HN_TXD_FLAG_DMAMAP) { | } else if (txd->flags & HN_TXD_FLAG_DMAMAP) { | ||||
bus_dmamap_sync(txr->hn_tx_data_dtag, | bus_dmamap_sync(txr->hn_tx_data_dtag, | ||||
txd->data_dmap, BUS_DMASYNC_POSTWRITE); | txd->data_dmap, BUS_DMASYNC_POSTWRITE); | ||||
bus_dmamap_unload(txr->hn_tx_data_dtag, | bus_dmamap_unload(txr->hn_tx_data_dtag, | ||||
txd->data_dmap); | txd->data_dmap); | ||||
txd->flags &= ~HN_TXD_FLAG_DMAMAP; | txd->flags &= ~HN_TXD_FLAG_DMAMAP; | ||||
} | } | ||||
Show All 38 Lines | #else | ||||
txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); | txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); | ||||
#endif | #endif | ||||
if (txd != NULL) { | if (txd != NULL) { | ||||
#ifdef HN_USE_TXDESC_BUFRING | #ifdef HN_USE_TXDESC_BUFRING | ||||
atomic_subtract_int(&txr->hn_txdesc_avail, 1); | atomic_subtract_int(&txr->hn_txdesc_avail, 1); | ||||
#endif | #endif | ||||
KASSERT(txd->m == NULL && txd->refs == 0 && | KASSERT(txd->m == NULL && txd->refs == 0 && | ||||
STAILQ_EMPTY(&txd->agg_list) && | |||||
txd->chim_index == HN_NVS_CHIM_IDX_INVALID && | txd->chim_index == HN_NVS_CHIM_IDX_INVALID && | ||||
txd->chim_size == 0 && | |||||
(txd->flags & HN_TXD_FLAG_ONLIST) && | (txd->flags & HN_TXD_FLAG_ONLIST) && | ||||
(txd->flags & HN_TXD_FLAG_ONAGG) == 0 && | |||||
(txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("invalid txd")); | (txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("invalid txd")); | ||||
txd->flags &= ~HN_TXD_FLAG_ONLIST; | txd->flags &= ~HN_TXD_FLAG_ONLIST; | ||||
txd->refs = 1; | txd->refs = 1; | ||||
} | } | ||||
return txd; | return txd; | ||||
} | } | ||||
static __inline void | static __inline void | ||||
hn_txdesc_hold(struct hn_txdesc *txd) | hn_txdesc_hold(struct hn_txdesc *txd) | ||||
{ | { | ||||
/* 0->1 transition will never work */ | /* 0->1 transition will never work */ | ||||
KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs)); | KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs)); | ||||
atomic_add_int(&txd->refs, 1); | atomic_add_int(&txd->refs, 1); | ||||
} | } | ||||
static __inline void | |||||
hn_txdesc_agg(struct hn_txdesc *agg_txd, struct hn_txdesc *txd) | |||||
{ | |||||
KASSERT((agg_txd->flags & HN_TXD_FLAG_ONAGG) == 0, | |||||
("recursive aggregation on aggregating txdesc")); | |||||
KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0, | |||||
("already aggregated")); | |||||
KASSERT(STAILQ_EMPTY(&txd->agg_list), | |||||
("recursive aggregation on to-be-aggregated txdesc")); | |||||
txd->flags |= HN_TXD_FLAG_ONAGG; | |||||
STAILQ_INSERT_TAIL(&agg_txd->agg_list, txd, agg_link); | |||||
} | |||||
static bool | static bool | ||||
hn_tx_ring_pending(struct hn_tx_ring *txr) | hn_tx_ring_pending(struct hn_tx_ring *txr) | ||||
{ | { | ||||
bool pending = false; | bool pending = false; | ||||
#ifndef HN_USE_TXDESC_BUFRING | #ifndef HN_USE_TXDESC_BUFRING | ||||
mtx_lock_spin(&txr->hn_txlist_spin); | mtx_lock_spin(&txr->hn_txlist_spin); | ||||
if (txr->hn_txdesc_avail != txr->hn_txdesc_cnt) | if (txr->hn_txdesc_avail != txr->hn_txdesc_cnt) | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt, size_t pktsize, | ||||
pkt->rm_dataoffset += pi_size; | pkt->rm_dataoffset += pi_size; | ||||
/* Update RNDIS packet msg length */ | /* Update RNDIS packet msg length */ | ||||
pkt->rm_len += pi_size; | pkt->rm_len += pi_size; | ||||
return (pi->rm_data); | return (pi->rm_data); | ||||
} | } | ||||
static __inline int | |||||
hn_flush_txagg(struct ifnet *ifp, struct hn_tx_ring *txr) | |||||
{ | |||||
struct hn_txdesc *txd; | |||||
struct mbuf *m; | |||||
int error, pkts; | |||||
txd = txr->hn_agg_txd; | |||||
KASSERT(txd != NULL, ("no aggregate txdesc")); | |||||
/* | /* | ||||
* Since hn_txpkt() will reset this temporary stat, save | |||||
* it now, so that oerrors can be updated properly, if | |||||
* hn_txpkt() ever fails. | |||||
*/ | |||||
pkts = txr->hn_stat_pkts; | |||||
/* | |||||
* Since txd's mbuf will _not_ be freed upon hn_txpkt() | |||||
* failure, save it for later freeing, if hn_txpkt() ever | |||||
* fails. | |||||
*/ | |||||
m = txd->m; | |||||
error = hn_txpkt(ifp, txr, txd); | |||||
if (__predict_false(error)) { | |||||
/* txd is freed, but m is not. */ | |||||
m_freem(m); | |||||
txr->hn_flush_failed++; | |||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, pkts); | |||||
} | |||||
/* Reset all aggregation states. */ | |||||
txr->hn_agg_txd = NULL; | |||||
txr->hn_agg_szleft = 0; | |||||
txr->hn_agg_pktleft = 0; | |||||
txr->hn_agg_prevpkt = NULL; | |||||
return (error); | |||||
} | |||||
static void * | |||||
hn_try_txagg(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, | |||||
int pktsize) | |||||
{ | |||||
void *chim; | |||||
if (txr->hn_agg_txd != NULL) { | |||||
if (txr->hn_agg_pktleft >= 1 && txr->hn_agg_szleft > pktsize) { | |||||
struct hn_txdesc *agg_txd = txr->hn_agg_txd; | |||||
struct rndis_packet_msg *pkt = txr->hn_agg_prevpkt; | |||||
int olen; | |||||
/* | |||||
* Update the previous RNDIS packet's total length, | |||||
* it can be increased due to the mandatory alignment | |||||
* padding for this RNDIS packet. And update the | |||||
* aggregating txdesc's chimney sending buffer size | |||||
* accordingly. | |||||
* | |||||
* XXX | |||||
* Zero-out the padding, as required by the RNDIS spec. | |||||
*/ | |||||
olen = pkt->rm_len; | |||||
pkt->rm_len = roundup2(olen, txr->hn_agg_align); | |||||
agg_txd->chim_size += pkt->rm_len - olen; | |||||
/* Link this txdesc to the parent. */ | |||||
hn_txdesc_agg(agg_txd, txd); | |||||
chim = (uint8_t *)pkt + pkt->rm_len; | |||||
/* Save the current packet for later fixup. */ | |||||
txr->hn_agg_prevpkt = chim; | |||||
txr->hn_agg_pktleft--; | |||||
txr->hn_agg_szleft -= pktsize; | |||||
if (txr->hn_agg_szleft <= | |||||
HN_PKTSIZE_MIN(txr->hn_agg_align)) { | |||||
/* | |||||
* Probably can't aggregate more packets, | |||||
* flush this aggregating txdesc proactively. | |||||
*/ | |||||
txr->hn_agg_pktleft = 0; | |||||
} | |||||
/* Done! */ | |||||
return (chim); | |||||
} | |||||
hn_flush_txagg(ifp, txr); | |||||
} | |||||
KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); | |||||
txr->hn_tx_chimney_tried++; | |||||
txd->chim_index = hn_chim_alloc(txr->hn_sc); | |||||
if (txd->chim_index == HN_NVS_CHIM_IDX_INVALID) | |||||
return (NULL); | |||||
txr->hn_tx_chimney++; | |||||
chim = txr->hn_sc->hn_chim + | |||||
(txd->chim_index * txr->hn_sc->hn_chim_szmax); | |||||
if (txr->hn_agg_pktmax > 1 && | |||||
txr->hn_agg_szmax > pktsize + HN_PKTSIZE_MIN(txr->hn_agg_align)) { | |||||
txr->hn_agg_txd = txd; | |||||
txr->hn_agg_pktleft = txr->hn_agg_pktmax - 1; | |||||
txr->hn_agg_szleft = txr->hn_agg_szmax - pktsize; | |||||
txr->hn_agg_prevpkt = chim; | |||||
} | |||||
return (chim); | |||||
} | |||||
/* | |||||
* NOTE: | * NOTE: | ||||
* If this function fails, then both txd and m_head0 will be freed. | * If this function fails, then both txd and m_head0 will be freed. | ||||
*/ | */ | ||||
static int | static int | ||||
hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0) | hn_encap(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd, | ||||
struct mbuf **m_head0) | |||||
{ | { | ||||
bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; | bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; | ||||
int error, nsegs, i; | int error, nsegs, i; | ||||
struct mbuf *m_head = *m_head0; | struct mbuf *m_head = *m_head0; | ||||
struct rndis_packet_msg *pkt; | struct rndis_packet_msg *pkt; | ||||
uint32_t *pi_data; | uint32_t *pi_data; | ||||
void *chim = NULL; | void *chim = NULL; | ||||
int pktlen; | int pkt_hlen, pkt_size; | ||||
pkt = txd->rndis_pkt; | pkt = txd->rndis_pkt; | ||||
if (m_head->m_pkthdr.len + HN_RNDIS_PKT_LEN < txr->hn_chim_size) { | pkt_size = HN_PKTSIZE(m_head, txr->hn_agg_align); | ||||
/* | if (pkt_size < txr->hn_chim_size) { | ||||
* This packet is small enough to fit into a chimney sending | chim = hn_try_txagg(ifp, txr, txd, pkt_size); | ||||
* buffer. Try allocating one chimney sending buffer now. | if (chim != NULL) | ||||
*/ | |||||
txr->hn_tx_chimney_tried++; | |||||
txd->chim_index = hn_chim_alloc(txr->hn_sc); | |||||
if (txd->chim_index != HN_NVS_CHIM_IDX_INVALID) { | |||||
chim = txr->hn_sc->hn_chim + | |||||
(txd->chim_index * txr->hn_sc->hn_chim_szmax); | |||||
/* | |||||
* Directly fill the chimney sending buffer w/ the | |||||
* RNDIS packet message. | |||||
*/ | |||||
pkt = chim; | pkt = chim; | ||||
} else { | |||||
if (txr->hn_agg_txd != NULL) | |||||
hn_flush_txagg(ifp, txr); | |||||
} | } | ||||
} | |||||
pkt->rm_type = REMOTE_NDIS_PACKET_MSG; | pkt->rm_type = REMOTE_NDIS_PACKET_MSG; | ||||
pkt->rm_len = sizeof(*pkt) + m_head->m_pkthdr.len; | pkt->rm_len = sizeof(*pkt) + m_head->m_pkthdr.len; | ||||
pkt->rm_dataoffset = sizeof(*pkt); | pkt->rm_dataoffset = sizeof(*pkt); | ||||
pkt->rm_datalen = m_head->m_pkthdr.len; | pkt->rm_datalen = m_head->m_pkthdr.len; | ||||
pkt->rm_oobdataoffset = 0; | |||||
pkt->rm_oobdatalen = 0; | |||||
pkt->rm_oobdataelements = 0; | |||||
pkt->rm_pktinfooffset = sizeof(*pkt); | pkt->rm_pktinfooffset = sizeof(*pkt); | ||||
pkt->rm_pktinfolen = 0; | pkt->rm_pktinfolen = 0; | ||||
pkt->rm_vchandle = 0; | |||||
pkt->rm_reserved = 0; | |||||
if (txr->hn_tx_flags & HN_TX_FLAG_HASHVAL) { | if (txr->hn_tx_flags & HN_TX_FLAG_HASHVAL) { | ||||
/* | /* | ||||
* Set the hash value for this packet, so that the host could | * Set the hash value for this packet, so that the host could | ||||
* dispatch the TX done event for this packet back to this TX | * dispatch the TX done event for this packet back to this TX | ||||
* ring's channel. | * ring's channel. | ||||
*/ | */ | ||||
pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, | pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN, | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | #endif /* INET6 || INET */ | ||||
if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) | if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) | ||||
*pi_data |= NDIS_TXCSUM_INFO_TCPCS; | *pi_data |= NDIS_TXCSUM_INFO_TCPCS; | ||||
else if (m_head->m_pkthdr.csum_flags & | else if (m_head->m_pkthdr.csum_flags & | ||||
(CSUM_IP_UDP | CSUM_IP6_UDP)) | (CSUM_IP_UDP | CSUM_IP6_UDP)) | ||||
*pi_data |= NDIS_TXCSUM_INFO_UDPCS; | *pi_data |= NDIS_TXCSUM_INFO_UDPCS; | ||||
} | } | ||||
pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen; | pkt_hlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen; | ||||
/* Convert RNDIS packet message offsets */ | /* Convert RNDIS packet message offsets */ | ||||
pkt->rm_dataoffset = hn_rndis_pktmsg_offset(pkt->rm_dataoffset); | pkt->rm_dataoffset = hn_rndis_pktmsg_offset(pkt->rm_dataoffset); | ||||
pkt->rm_pktinfooffset = hn_rndis_pktmsg_offset(pkt->rm_pktinfooffset); | pkt->rm_pktinfooffset = hn_rndis_pktmsg_offset(pkt->rm_pktinfooffset); | ||||
/* | /* | ||||
* Fast path: Chimney sending. | * Fast path: Chimney sending. | ||||
*/ | */ | ||||
if (chim != NULL) { | if (chim != NULL) { | ||||
KASSERT(txd->chim_index != HN_NVS_CHIM_IDX_INVALID, | struct hn_txdesc *tgt_txd = txd; | ||||
("chimney buffer is not used")); | |||||
KASSERT(pkt == chim, ("RNDIS pkt not in chimney buffer")); | |||||
if (txr->hn_agg_txd != NULL) { | |||||
tgt_txd = txr->hn_agg_txd; | |||||
#ifdef INVARIANTS | |||||
*m_head0 = NULL; | |||||
#endif | |||||
} | |||||
KASSERT(pkt == chim, | |||||
("RNDIS pkt not in chimney sending buffer")); | |||||
KASSERT(tgt_txd->chim_index != HN_NVS_CHIM_IDX_INVALID, | |||||
("chimney sending buffer is not used")); | |||||
tgt_txd->chim_size += pkt->rm_len; | |||||
m_copydata(m_head, 0, m_head->m_pkthdr.len, | m_copydata(m_head, 0, m_head->m_pkthdr.len, | ||||
((uint8_t *)chim) + pktlen); | ((uint8_t *)chim) + pkt_hlen); | ||||
txd->chim_size = pkt->rm_len; | |||||
txr->hn_gpa_cnt = 0; | txr->hn_gpa_cnt = 0; | ||||
txr->hn_tx_chimney++; | |||||
txr->hn_sendpkt = hn_txpkt_chim; | txr->hn_sendpkt = hn_txpkt_chim; | ||||
goto done; | goto done; | ||||
} | } | ||||
KASSERT(txr->hn_agg_txd == NULL, ("aggregating sglist txdesc")); | |||||
KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID, | KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID, | ||||
("chimney buffer is used")); | ("chimney buffer is used")); | ||||
KASSERT(pkt == txd->rndis_pkt, ("RNDIS pkt not in txdesc")); | KASSERT(pkt == txd->rndis_pkt, ("RNDIS pkt not in txdesc")); | ||||
error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); | error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); | ||||
if (error) { | if (__predict_false(error)) { | ||||
int freed; | int freed; | ||||
/* | /* | ||||
* This mbuf is not linked w/ the txd yet, so free it now. | * This mbuf is not linked w/ the txd yet, so free it now. | ||||
*/ | */ | ||||
m_freem(m_head); | m_freem(m_head); | ||||
*m_head0 = NULL; | *m_head0 = NULL; | ||||
freed = hn_txdesc_put(txr, txd); | freed = hn_txdesc_put(txr, txd); | ||||
KASSERT(freed != 0, | KASSERT(freed != 0, | ||||
("fail to free txd upon txdma error")); | ("fail to free txd upon txdma error")); | ||||
txr->hn_txdma_failed++; | txr->hn_txdma_failed++; | ||||
if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | ||||
return error; | return error; | ||||
} | } | ||||
*m_head0 = m_head; | *m_head0 = m_head; | ||||
/* +1 RNDIS packet message */ | /* +1 RNDIS packet message */ | ||||
txr->hn_gpa_cnt = nsegs + 1; | txr->hn_gpa_cnt = nsegs + 1; | ||||
/* send packet with page buffer */ | /* send packet with page buffer */ | ||||
txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr); | txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr); | ||||
txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK; | txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK; | ||||
txr->hn_gpa[0].gpa_len = pktlen; | txr->hn_gpa[0].gpa_len = pkt_hlen; | ||||
/* | /* | ||||
* Fill the page buffers with mbuf info after the page | * Fill the page buffers with mbuf info after the page | ||||
* buffer for RNDIS packet message. | * buffer for RNDIS packet message. | ||||
*/ | */ | ||||
for (i = 0; i < nsegs; ++i) { | for (i = 0; i < nsegs; ++i) { | ||||
struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1]; | struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1]; | ||||
gpa->gpa_page = atop(segs[i].ds_addr); | gpa->gpa_page = atop(segs[i].ds_addr); | ||||
gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK; | gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK; | ||||
gpa->gpa_len = segs[i].ds_len; | gpa->gpa_len = segs[i].ds_len; | ||||
} | } | ||||
txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | ||||
txd->chim_size = 0; | txd->chim_size = 0; | ||||
txr->hn_sendpkt = hn_txpkt_sglist; | txr->hn_sendpkt = hn_txpkt_sglist; | ||||
done: | done: | ||||
txd->m = m_head; | txd->m = m_head; | ||||
/* Set the completion routine */ | /* Set the completion routine */ | ||||
hn_nvs_sendctx_init(&txd->send_ctx, hn_txpkt_done, txd); | hn_nvs_sendctx_init(&txd->send_ctx, hn_txpkt_done, txd); | ||||
/* Update temporary stats for later use. */ | |||||
txr->hn_stat_pkts++; | |||||
txr->hn_stat_size += m_head->m_pkthdr.len; | |||||
if (m_head->m_flags & M_MCAST) | |||||
txr->hn_stat_mcasts++; | |||||
return 0; | return 0; | ||||
} | } | ||||
/* | /* | ||||
* NOTE: | * NOTE: | ||||
* If this function fails, then txd will be freed, but the mbuf | * If this function fails, then txd will be freed, but the mbuf | ||||
* associated w/ the txd will _not_ be freed. | * associated w/ the txd will _not_ be freed. | ||||
*/ | */ | ||||
static int | static int | ||||
hn_txpkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd) | hn_txpkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd) | ||||
{ | { | ||||
int error, send_failed = 0; | int error, send_failed = 0; | ||||
again: | again: | ||||
/* | /* | ||||
* Make sure that txd is not freed before ETHER_BPF_MTAP. | * Make sure that this txd and any aggregated txds are not freed | ||||
* before ETHER_BPF_MTAP. | |||||
*/ | */ | ||||
hn_txdesc_hold(txd); | hn_txdesc_hold(txd); | ||||
error = txr->hn_sendpkt(txr, txd); | error = txr->hn_sendpkt(txr, txd); | ||||
if (!error) { | if (!error) { | ||||
if (bpf_peers_present(ifp->if_bpf)) { | |||||
const struct hn_txdesc *tmp_txd; | |||||
ETHER_BPF_MTAP(ifp, txd->m); | ETHER_BPF_MTAP(ifp, txd->m); | ||||
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); | STAILQ_FOREACH(tmp_txd, &txd->agg_list, agg_link) | ||||
ETHER_BPF_MTAP(ifp, tmp_txd->m); | |||||
} | |||||
if_inc_counter(ifp, IFCOUNTER_OPACKETS, txr->hn_stat_pkts); | |||||
#ifdef HN_IFSTART_SUPPORT | #ifdef HN_IFSTART_SUPPORT | ||||
if (!hn_use_if_start) | if (!hn_use_if_start) | ||||
#endif | #endif | ||||
{ | { | ||||
if_inc_counter(ifp, IFCOUNTER_OBYTES, | if_inc_counter(ifp, IFCOUNTER_OBYTES, | ||||
txd->m->m_pkthdr.len); | txr->hn_stat_size); | ||||
if (txd->m->m_flags & M_MCAST) | if (txr->hn_stat_mcasts != 0) { | ||||
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); | if_inc_counter(ifp, IFCOUNTER_OMCASTS, | ||||
txr->hn_stat_mcasts); | |||||
} | } | ||||
txr->hn_pkts++; | |||||
} | } | ||||
txr->hn_pkts += txr->hn_stat_pkts; | |||||
txr->hn_sends++; | |||||
} | |||||
hn_txdesc_put(txr, txd); | hn_txdesc_put(txr, txd); | ||||
if (__predict_false(error)) { | if (__predict_false(error)) { | ||||
int freed; | int freed; | ||||
/* | /* | ||||
* This should "really rarely" happen. | * This should "really rarely" happen. | ||||
* | * | ||||
Show All 22 Lines | if (__predict_false(error)) { | ||||
*/ | */ | ||||
txd->m = NULL; | txd->m = NULL; | ||||
freed = hn_txdesc_put(txr, txd); | freed = hn_txdesc_put(txr, txd); | ||||
KASSERT(freed != 0, | KASSERT(freed != 0, | ||||
("fail to free txd upon send error")); | ("fail to free txd upon send error")); | ||||
txr->hn_send_failed++; | txr->hn_send_failed++; | ||||
} | } | ||||
return error; | |||||
/* Reset temporary stats, after this sending is done. */ | |||||
txr->hn_stat_size = 0; | |||||
txr->hn_stat_pkts = 0; | |||||
txr->hn_stat_mcasts = 0; | |||||
return (error); | |||||
} | } | ||||
/* | /* | ||||
* Append the specified data to the indicated mbuf chain, | * Append the specified data to the indicated mbuf chain, | ||||
* Extend the mbuf chain if the new data does not fit in | * Extend the mbuf chain if the new data does not fit in | ||||
* existing space. | * existing space. | ||||
* | * | ||||
* This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c. | * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c. | ||||
▲ Show 20 Lines • Show All 767 Lines • ▼ Show 20 Lines | for (i = 0; i < sc->hn_tx_ring_inuse; ++i) { | ||||
*((int *)((uint8_t *)txr + ofs)) = conf; | *((int *)((uint8_t *)txr + ofs)) = conf; | ||||
} | } | ||||
HN_UNLOCK(sc); | HN_UNLOCK(sc); | ||||
return 0; | return 0; | ||||
} | } | ||||
static int | static int | ||||
hn_txagg_size_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int error, size; | |||||
size = sc->hn_agg_size; | |||||
error = sysctl_handle_int(oidp, &size, 0, req); | |||||
if (error || req->newptr == NULL) | |||||
return (error); | |||||
HN_LOCK(sc); | |||||
sc->hn_agg_size = size; | |||||
hn_set_txagg(sc); | |||||
HN_UNLOCK(sc); | |||||
return (0); | |||||
} | |||||
static int | |||||
hn_txagg_pkts_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int error, pkts; | |||||
pkts = sc->hn_agg_pkts; | |||||
error = sysctl_handle_int(oidp, &pkts, 0, req); | |||||
if (error || req->newptr == NULL) | |||||
return (error); | |||||
HN_LOCK(sc); | |||||
sc->hn_agg_pkts = pkts; | |||||
hn_set_txagg(sc); | |||||
HN_UNLOCK(sc); | |||||
return (0); | |||||
} | |||||
static int | |||||
hn_txagg_pktmax_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int pkts; | |||||
pkts = sc->hn_tx_ring[0].hn_agg_pktmax; | |||||
return (sysctl_handle_int(oidp, &pkts, 0, req)); | |||||
} | |||||
static int | |||||
hn_txagg_align_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int align; | |||||
align = sc->hn_tx_ring[0].hn_agg_align; | |||||
return (sysctl_handle_int(oidp, &align, 0, req)); | |||||
} | |||||
static int | |||||
hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS) | hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct hn_softc *sc = arg1; | struct hn_softc *sc = arg1; | ||||
char verstr[16]; | char verstr[16]; | ||||
snprintf(verstr, sizeof(verstr), "%u.%u", | snprintf(verstr, sizeof(verstr), "%u.%u", | ||||
HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), | HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), | ||||
HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); | HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); | ||||
▲ Show 20 Lines • Show All 526 Lines • ▼ Show 20 Lines | if (error) { | ||||
return error; | return error; | ||||
} | } | ||||
for (i = 0; i < txr->hn_txdesc_cnt; ++i) { | for (i = 0; i < txr->hn_txdesc_cnt; ++i) { | ||||
struct hn_txdesc *txd = &txr->hn_txdesc[i]; | struct hn_txdesc *txd = &txr->hn_txdesc[i]; | ||||
txd->txr = txr; | txd->txr = txr; | ||||
txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | txd->chim_index = HN_NVS_CHIM_IDX_INVALID; | ||||
STAILQ_INIT(&txd->agg_list); | |||||
/* | /* | ||||
* Allocate and load RNDIS packet message. | * Allocate and load RNDIS packet message. | ||||
*/ | */ | ||||
error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, | error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, | ||||
(void **)&txd->rndis_pkt, | (void **)&txd->rndis_pkt, | ||||
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, | BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, | ||||
&txd->rndis_pkt_dmap); | &txd->rndis_pkt_dmap); | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | #endif | ||||
{ | { | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive", | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive", | ||||
CTLFLAG_RD, &txr->hn_oactive, 0, | CTLFLAG_RD, &txr->hn_oactive, 0, | ||||
"over active"); | "over active"); | ||||
} | } | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets", | ||||
CTLFLAG_RW, &txr->hn_pkts, | CTLFLAG_RW, &txr->hn_pkts, | ||||
"# of packets transmitted"); | "# of packets transmitted"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "sends", | |||||
CTLFLAG_RW, &txr->hn_sends, "# of sends"); | |||||
} | } | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
static void | static void | ||||
hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) | hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | hn_create_tx_data(struct hn_softc *sc, int ring_cnt) | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", | ||||
CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | ||||
__offsetof(struct hn_tx_ring, hn_send_failed), | __offsetof(struct hn_tx_ring, hn_send_failed), | ||||
hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); | hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", | ||||
CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | ||||
__offsetof(struct hn_tx_ring, hn_txdma_failed), | __offsetof(struct hn_tx_ring, hn_txdma_failed), | ||||
hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); | hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_flush_failed", | |||||
CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | |||||
__offsetof(struct hn_tx_ring, hn_flush_failed), | |||||
hn_tx_stat_ulong_sysctl, "LU", | |||||
"# of packet transmission aggregation flush failure"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", | ||||
CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | ||||
__offsetof(struct hn_tx_ring, hn_tx_collapsed), | __offsetof(struct hn_tx_ring, hn_tx_collapsed), | ||||
hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); | hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); | ||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", | ||||
CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, | ||||
__offsetof(struct hn_tx_ring, hn_tx_chimney), | __offsetof(struct hn_tx_ring, hn_tx_chimney), | ||||
hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); | hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); | ||||
Show All 20 Lines | SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx", | ||||
__offsetof(struct hn_tx_ring, hn_sched_tx), | __offsetof(struct hn_tx_ring, hn_sched_tx), | ||||
hn_tx_conf_int_sysctl, "I", | hn_tx_conf_int_sysctl, "I", | ||||
"Always schedule transmission " | "Always schedule transmission " | ||||
"instead of doing direct transmission"); | "instead of doing direct transmission"); | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt", | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt", | ||||
CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings"); | CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings"); | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse", | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse", | ||||
CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings"); | CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings"); | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "agg_szmax", | |||||
CTLFLAG_RD, &sc->hn_tx_ring[0].hn_agg_szmax, 0, | |||||
"Applied packet transmission aggregation size"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_pktmax", | |||||
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, | |||||
hn_txagg_pktmax_sysctl, "I", | |||||
"Applied packet transmission aggregation packets"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "agg_align", | |||||
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, | |||||
hn_txagg_align_sysctl, "I", | |||||
"Applied packet transmission aggregation alignment"); | |||||
return 0; | return 0; | ||||
} | } | ||||
static void | static void | ||||
hn_set_chim_size(struct hn_softc *sc, int chim_size) | hn_set_chim_size(struct hn_softc *sc, int chim_size) | ||||
{ | { | ||||
int i; | int i; | ||||
▲ Show 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | hn_start_taskfunc(void *xtxr, int pending __unused) | ||||
mtx_unlock(&txr->hn_tx_lock); | mtx_unlock(&txr->hn_tx_lock); | ||||
} | } | ||||
static int | static int | ||||
hn_start_locked(struct hn_tx_ring *txr, int len) | hn_start_locked(struct hn_tx_ring *txr, int len) | ||||
{ | { | ||||
struct hn_softc *sc = txr->hn_sc; | struct hn_softc *sc = txr->hn_sc; | ||||
struct ifnet *ifp = sc->hn_ifp; | struct ifnet *ifp = sc->hn_ifp; | ||||
int sched = 0; | |||||
KASSERT(hn_use_if_start, | KASSERT(hn_use_if_start, | ||||
("hn_start_locked is called, when if_start is disabled")); | ("hn_start_locked is called, when if_start is disabled")); | ||||
KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); | KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); | ||||
mtx_assert(&txr->hn_tx_lock, MA_OWNED); | mtx_assert(&txr->hn_tx_lock, MA_OWNED); | ||||
KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); | |||||
if (__predict_false(txr->hn_suspended)) | if (__predict_false(txr->hn_suspended)) | ||||
return 0; | return (0); | ||||
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != | if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != | ||||
IFF_DRV_RUNNING) | IFF_DRV_RUNNING) | ||||
return 0; | return (0); | ||||
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { | while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { | ||||
struct hn_txdesc *txd; | struct hn_txdesc *txd; | ||||
struct mbuf *m_head; | struct mbuf *m_head; | ||||
int error; | int error; | ||||
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); | IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); | ||||
if (m_head == NULL) | if (m_head == NULL) | ||||
break; | break; | ||||
if (len > 0 && m_head->m_pkthdr.len > len) { | if (len > 0 && m_head->m_pkthdr.len > len) { | ||||
/* | /* | ||||
* This sending could be time consuming; let callers | * This sending could be time consuming; let callers | ||||
* dispatch this packet sending (and sending of any | * dispatch this packet sending (and sending of any | ||||
* following up packets) to tx taskqueue. | * following up packets) to tx taskqueue. | ||||
*/ | */ | ||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | ||||
return 1; | sched = 1; | ||||
break; | |||||
} | } | ||||
#if defined(INET6) || defined(INET) | #if defined(INET6) || defined(INET) | ||||
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { | if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { | ||||
m_head = hn_tso_fixup(m_head); | m_head = hn_tso_fixup(m_head); | ||||
if (__predict_false(m_head == NULL)) { | if (__predict_false(m_head == NULL)) { | ||||
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); | ||||
continue; | continue; | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
txd = hn_txdesc_get(txr); | txd = hn_txdesc_get(txr); | ||||
if (txd == NULL) { | if (txd == NULL) { | ||||
txr->hn_no_txdescs++; | txr->hn_no_txdescs++; | ||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | ||||
atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); | atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); | ||||
break; | break; | ||||
} | } | ||||
error = hn_encap(txr, txd, &m_head); | error = hn_encap(ifp, txr, txd, &m_head); | ||||
if (error) { | if (error) { | ||||
/* Both txd and m_head are freed */ | /* Both txd and m_head are freed */ | ||||
KASSERT(txr->hn_agg_txd == NULL, | |||||
("encap failed w/ pending aggregating txdesc")); | |||||
continue; | continue; | ||||
} | } | ||||
if (txr->hn_agg_pktleft == 0) { | |||||
if (txr->hn_agg_txd != NULL) { | |||||
KASSERT(m_head == NULL, | |||||
("pending mbuf for aggregating txdesc")); | |||||
error = hn_flush_txagg(ifp, txr); | |||||
if (__predict_false(error)) { | |||||
atomic_set_int(&ifp->if_drv_flags, | |||||
IFF_DRV_OACTIVE); | |||||
break; | |||||
} | |||||
} else { | |||||
KASSERT(m_head != NULL, ("mbuf was freed")); | |||||
error = hn_txpkt(ifp, txr, txd); | error = hn_txpkt(ifp, txr, txd); | ||||
if (__predict_false(error)) { | if (__predict_false(error)) { | ||||
/* txd is freed, but m_head is not */ | /* txd is freed, but m_head is not */ | ||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | IFQ_DRV_PREPEND(&ifp->if_snd, m_head); | ||||
atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); | atomic_set_int(&ifp->if_drv_flags, | ||||
IFF_DRV_OACTIVE); | |||||
break; | break; | ||||
} | } | ||||
} | } | ||||
return 0; | |||||
} | } | ||||
#ifdef INVARIANTS | |||||
else { | |||||
KASSERT(txr->hn_agg_txd != NULL, | |||||
("no aggregating txdesc")); | |||||
KASSERT(m_head == NULL, | |||||
("pending mbuf for aggregating txdesc")); | |||||
} | |||||
#endif | |||||
} | |||||
/* Flush pending aggerated transmission. */ | |||||
if (txr->hn_agg_txd != NULL) | |||||
hn_flush_txagg(ifp, txr); | |||||
return (sched); | |||||
} | |||||
static void | static void | ||||
hn_start(struct ifnet *ifp) | hn_start(struct ifnet *ifp) | ||||
{ | { | ||||
struct hn_softc *sc = ifp->if_softc; | struct hn_softc *sc = ifp->if_softc; | ||||
struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; | struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; | ||||
if (txr->hn_sched_tx) | if (txr->hn_sched_tx) | ||||
goto do_sched; | goto do_sched; | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
#endif /* HN_IFSTART_SUPPORT */ | #endif /* HN_IFSTART_SUPPORT */ | ||||
static int | static int | ||||
hn_xmit(struct hn_tx_ring *txr, int len) | hn_xmit(struct hn_tx_ring *txr, int len) | ||||
{ | { | ||||
struct hn_softc *sc = txr->hn_sc; | struct hn_softc *sc = txr->hn_sc; | ||||
struct ifnet *ifp = sc->hn_ifp; | struct ifnet *ifp = sc->hn_ifp; | ||||
struct mbuf *m_head; | struct mbuf *m_head; | ||||
int sched = 0; | |||||
mtx_assert(&txr->hn_tx_lock, MA_OWNED); | mtx_assert(&txr->hn_tx_lock, MA_OWNED); | ||||
#ifdef HN_IFSTART_SUPPORT | #ifdef HN_IFSTART_SUPPORT | ||||
KASSERT(hn_use_if_start == 0, | KASSERT(hn_use_if_start == 0, | ||||
("hn_xmit is called, when if_start is enabled")); | ("hn_xmit is called, when if_start is enabled")); | ||||
#endif | #endif | ||||
KASSERT(txr->hn_agg_txd == NULL, ("lingering aggregating txdesc")); | |||||
if (__predict_false(txr->hn_suspended)) | if (__predict_false(txr->hn_suspended)) | ||||
return 0; | return (0); | ||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive) | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive) | ||||
return 0; | return (0); | ||||
while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) { | while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) { | ||||
struct hn_txdesc *txd; | struct hn_txdesc *txd; | ||||
int error; | int error; | ||||
if (len > 0 && m_head->m_pkthdr.len > len) { | if (len > 0 && m_head->m_pkthdr.len > len) { | ||||
/* | /* | ||||
* This sending could be time consuming; let callers | * This sending could be time consuming; let callers | ||||
* dispatch this packet sending (and sending of any | * dispatch this packet sending (and sending of any | ||||
* following up packets) to tx taskqueue. | * following up packets) to tx taskqueue. | ||||
*/ | */ | ||||
drbr_putback(ifp, txr->hn_mbuf_br, m_head); | drbr_putback(ifp, txr->hn_mbuf_br, m_head); | ||||
return 1; | sched = 1; | ||||
break; | |||||
} | } | ||||
txd = hn_txdesc_get(txr); | txd = hn_txdesc_get(txr); | ||||
if (txd == NULL) { | if (txd == NULL) { | ||||
txr->hn_no_txdescs++; | txr->hn_no_txdescs++; | ||||
drbr_putback(ifp, txr->hn_mbuf_br, m_head); | drbr_putback(ifp, txr->hn_mbuf_br, m_head); | ||||
txr->hn_oactive = 1; | txr->hn_oactive = 1; | ||||
break; | break; | ||||
} | } | ||||
error = hn_encap(txr, txd, &m_head); | error = hn_encap(ifp, txr, txd, &m_head); | ||||
if (error) { | if (error) { | ||||
/* Both txd and m_head are freed; discard */ | /* Both txd and m_head are freed; discard */ | ||||
KASSERT(txr->hn_agg_txd == NULL, | |||||
("encap failed w/ pending aggregating txdesc")); | |||||
drbr_advance(ifp, txr->hn_mbuf_br); | drbr_advance(ifp, txr->hn_mbuf_br); | ||||
continue; | continue; | ||||
} | } | ||||
if (txr->hn_agg_pktleft == 0) { | |||||
if (txr->hn_agg_txd != NULL) { | |||||
KASSERT(m_head == NULL, | |||||
("pending mbuf for aggregating txdesc")); | |||||
error = hn_flush_txagg(ifp, txr); | |||||
if (__predict_false(error)) { | |||||
txr->hn_oactive = 1; | |||||
break; | |||||
} | |||||
} else { | |||||
KASSERT(m_head != NULL, ("mbuf was freed")); | |||||
error = hn_txpkt(ifp, txr, txd); | error = hn_txpkt(ifp, txr, txd); | ||||
if (__predict_false(error)) { | if (__predict_false(error)) { | ||||
/* txd is freed, but m_head is not */ | /* txd is freed, but m_head is not */ | ||||
drbr_putback(ifp, txr->hn_mbuf_br, m_head); | drbr_putback(ifp, txr->hn_mbuf_br, | ||||
m_head); | |||||
txr->hn_oactive = 1; | txr->hn_oactive = 1; | ||||
break; | break; | ||||
} | } | ||||
} | |||||
} | |||||
#ifdef INVARIANTS | |||||
else { | |||||
KASSERT(txr->hn_agg_txd != NULL, | |||||
("no aggregating txdesc")); | |||||
KASSERT(m_head == NULL, | |||||
("pending mbuf for aggregating txdesc")); | |||||
} | |||||
#endif | |||||
/* Sent */ | /* Sent */ | ||||
drbr_advance(ifp, txr->hn_mbuf_br); | drbr_advance(ifp, txr->hn_mbuf_br); | ||||
} | } | ||||
return 0; | |||||
/* Flush pending aggerated transmission. */ | |||||
if (txr->hn_agg_txd != NULL) | |||||
hn_flush_txagg(ifp, txr); | |||||
return (sched); | |||||
} | } | ||||
static int | static int | ||||
hn_transmit(struct ifnet *ifp, struct mbuf *m) | hn_transmit(struct ifnet *ifp, struct mbuf *m) | ||||
{ | { | ||||
struct hn_softc *sc = ifp->if_softc; | struct hn_softc *sc = ifp->if_softc; | ||||
struct hn_tx_ring *txr; | struct hn_tx_ring *txr; | ||||
int error, idx = 0; | int error, idx = 0; | ||||
▲ Show 20 Lines • Show All 460 Lines • ▼ Show 20 Lines | back: | ||||
hn_set_ring_inuse(sc, nchan); | hn_set_ring_inuse(sc, nchan); | ||||
/* | /* | ||||
* Attach the sub-channels, if any. | * Attach the sub-channels, if any. | ||||
*/ | */ | ||||
error = hn_attach_subchans(sc); | error = hn_attach_subchans(sc); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* | |||||
* Fixup transmission aggregation setup. | |||||
*/ | |||||
hn_set_txagg(sc); | |||||
sc->hn_flags |= HN_FLAG_SYNTH_ATTACHED; | sc->hn_flags |= HN_FLAG_SYNTH_ATTACHED; | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* NOTE: | * NOTE: | ||||
* The interface must have been suspended though hn_suspend(), before | * The interface must have been suspended though hn_suspend(), before | ||||
▲ Show 20 Lines • Show All 781 Lines • Show Last 20 Lines |