Changeset View
Changeset View
Standalone View
Standalone View
head/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
Show First 20 Lines • Show All 269 Lines • ▼ Show 20 Lines | |||||
static void hn_start(struct ifnet *ifp); | static void hn_start(struct ifnet *ifp); | ||||
static void hn_start_txeof(struct ifnet *ifp); | static void hn_start_txeof(struct ifnet *ifp); | ||||
static int hn_ifmedia_upd(struct ifnet *ifp); | static int hn_ifmedia_upd(struct ifnet *ifp); | ||||
static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); | static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); | ||||
static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); | static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); | ||||
static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); | |||||
static int hn_check_iplen(const struct mbuf *, int); | static int hn_check_iplen(const struct mbuf *, int); | ||||
static int hn_create_tx_ring(struct hn_softc *sc); | static int hn_create_tx_ring(struct hn_softc *sc); | ||||
static void hn_destroy_tx_ring(struct hn_softc *sc); | static void hn_destroy_tx_ring(struct hn_softc *sc); | ||||
static void hn_start_taskfunc(void *xsc, int pending); | static void hn_start_taskfunc(void *xsc, int pending); | ||||
static void hn_txeof_taskfunc(void *xsc, int pending); | static void hn_txeof_taskfunc(void *xsc, int pending); | ||||
static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **); | static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **); | ||||
static void hn_create_rx_data(struct hn_softc *sc); | |||||
static void hn_destroy_rx_data(struct hn_softc *sc); | |||||
static int | static int | ||||
hn_ifmedia_upd(struct ifnet *ifp __unused) | hn_ifmedia_upd(struct ifnet *ifp __unused) | ||||
{ | { | ||||
return EOPNOTSUPP; | return EOPNOTSUPP; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | netvsc_attach(device_t dev) | ||||
int unit = device_get_unit(dev); | int unit = device_get_unit(dev); | ||||
struct ifnet *ifp = NULL; | struct ifnet *ifp = NULL; | ||||
struct sysctl_oid_list *child; | struct sysctl_oid_list *child; | ||||
struct sysctl_ctx_list *ctx; | struct sysctl_ctx_list *ctx; | ||||
int error; | int error; | ||||
#if __FreeBSD_version >= 1100045 | #if __FreeBSD_version >= 1100045 | ||||
int tso_maxlen; | int tso_maxlen; | ||||
#endif | #endif | ||||
#if defined(INET) || defined(INET6) | |||||
#if __FreeBSD_version >= 1100095 | |||||
int lroent_cnt; | |||||
#endif | |||||
#endif | |||||
sc = device_get_softc(dev); | sc = device_get_softc(dev); | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
bzero(sc, sizeof(hn_softc_t)); | bzero(sc, sizeof(hn_softc_t)); | ||||
sc->hn_unit = unit; | sc->hn_unit = unit; | ||||
sc->hn_dev = dev; | sc->hn_dev = dev; | ||||
sc->hn_direct_tx_size = hn_direct_tx_size; | sc->hn_direct_tx_size = hn_direct_tx_size; | ||||
if (hn_trust_hosttcp) | |||||
sc->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; | |||||
if (hn_trust_hostudp) | |||||
sc->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; | |||||
if (hn_trust_hostip) | |||||
sc->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; | |||||
if (hn_tx_taskq == NULL) { | if (hn_tx_taskq == NULL) { | ||||
sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, | sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, | ||||
taskqueue_thread_enqueue, &sc->hn_tx_taskq); | taskqueue_thread_enqueue, &sc->hn_tx_taskq); | ||||
taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", | taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", | ||||
device_get_nameunit(dev)); | device_get_nameunit(dev)); | ||||
} else { | } else { | ||||
sc->hn_tx_taskq = hn_tx_taskq; | sc->hn_tx_taskq = hn_tx_taskq; | ||||
} | } | ||||
TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc); | TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc); | ||||
TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc); | TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc); | ||||
error = hn_create_tx_ring(sc); | error = hn_create_tx_ring(sc); | ||||
if (error) | if (error) | ||||
goto failed; | goto failed; | ||||
NV_LOCK_INIT(sc, "NetVSCLock"); | NV_LOCK_INIT(sc, "NetVSCLock"); | ||||
sc->hn_dev_obj = device_ctx; | sc->hn_dev_obj = device_ctx; | ||||
ifp = sc->hn_ifp = if_alloc(IFT_ETHER); | ifp = sc->hn_ifp = if_alloc(IFT_ETHER); | ||||
ifp->if_softc = sc; | ifp->if_softc = sc; | ||||
hn_create_rx_data(sc); | |||||
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | ||||
ifp->if_dunit = unit; | ifp->if_dunit = unit; | ||||
ifp->if_dname = NETVSC_DEVNAME; | ifp->if_dname = NETVSC_DEVNAME; | ||||
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | ||||
ifp->if_ioctl = hn_ioctl; | ifp->if_ioctl = hn_ioctl; | ||||
ifp->if_start = hn_start; | ifp->if_start = hn_start; | ||||
ifp->if_init = hn_ifinit; | ifp->if_init = hn_ifinit; | ||||
Show All 29 Lines | #endif | ||||
error = hv_rf_on_device_add(device_ctx, &device_info); | error = hv_rf_on_device_add(device_ctx, &device_info); | ||||
if (error) | if (error) | ||||
goto failed; | goto failed; | ||||
if (device_info.link_state == 0) { | if (device_info.link_state == 0) { | ||||
sc->hn_carrier = 1; | sc->hn_carrier = 1; | ||||
} | } | ||||
#if defined(INET) || defined(INET6) | |||||
#if __FreeBSD_version >= 1100095 | |||||
lroent_cnt = hn_lro_entry_count; | |||||
if (lroent_cnt < TCP_LRO_ENTRIES) | |||||
lroent_cnt = TCP_LRO_ENTRIES; | |||||
tcp_lro_init_args(&sc->hn_lro, ifp, lroent_cnt, 0); | |||||
device_printf(dev, "LRO: entry count %d\n", lroent_cnt); | |||||
#else | |||||
tcp_lro_init(&sc->hn_lro); | |||||
/* Driver private LRO settings */ | |||||
sc->hn_lro.ifp = ifp; | |||||
#endif | |||||
sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; | |||||
sc->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; | |||||
#endif /* INET || INET6 */ | |||||
#if __FreeBSD_version >= 1100045 | #if __FreeBSD_version >= 1100045 | ||||
tso_maxlen = hn_tso_maxlen; | tso_maxlen = hn_tso_maxlen; | ||||
if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET) | if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET) | ||||
tso_maxlen = IP_MAXPACKET; | tso_maxlen = IP_MAXPACKET; | ||||
ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX; | ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX; | ||||
ifp->if_hw_tsomaxsegsize = PAGE_SIZE; | ifp->if_hw_tsomaxsegsize = PAGE_SIZE; | ||||
ifp->if_hw_tsomax = tso_maxlen - | ifp->if_hw_tsomax = tso_maxlen - | ||||
Show All 18 Lines | #endif | ||||
* to do direct transmission. This one gives the | * to do direct transmission. This one gives the | ||||
* best performance so far. | * best performance so far. | ||||
*/ | */ | ||||
sc->hn_sched_tx = 1; | sc->hn_sched_tx = 1; | ||||
ctx = device_get_sysctl_ctx(dev); | ctx = device_get_sysctl_ctx(dev); | ||||
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); | child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); | ||||
SYSCTL_ADD_U64(ctx, child, OID_AUTO, "lro_queued", | |||||
CTLFLAG_RW, &sc->hn_lro.lro_queued, 0, "LRO queued"); | |||||
SYSCTL_ADD_U64(ctx, child, OID_AUTO, "lro_flushed", | |||||
CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried", | |||||
CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", | |||||
CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", | |||||
"Max # of data bytes to be aggregated by LRO"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", | |||||
"Max # of ACKs to be aggregated by LRO"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust tcp segement verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust udp datagram verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust ip packet verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_ip", | |||||
CTLFLAG_RW, &sc->hn_csum_ip, "RXCSUM IP"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_tcp", | |||||
CTLFLAG_RW, &sc->hn_csum_tcp, "RXCSUM TCP"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_udp", | |||||
CTLFLAG_RW, &sc->hn_csum_udp, "RXCSUM UDP"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_trusted", | |||||
CTLFLAG_RW, &sc->hn_csum_trusted, | |||||
"# of packets that we trust host's csum verification"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "small_pkts", | |||||
CTLFLAG_RW, &sc->hn_small_pkts, "# of small packets received"); | |||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs", | ||||
CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs"); | CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed", | ||||
CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure"); | CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed", | ||||
CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure"); | CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed", | ||||
CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed"); | CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed"); | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | netvsc_detach(device_t dev) | ||||
hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL); | hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL); | ||||
taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task); | taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task); | ||||
taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task); | taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task); | ||||
if (sc->hn_tx_taskq != hn_tx_taskq) | if (sc->hn_tx_taskq != hn_tx_taskq) | ||||
taskqueue_free(sc->hn_tx_taskq); | taskqueue_free(sc->hn_tx_taskq); | ||||
ifmedia_removeall(&sc->hn_media); | ifmedia_removeall(&sc->hn_media); | ||||
#if defined(INET) || defined(INET6) | hn_destroy_rx_data(sc); | ||||
tcp_lro_free(&sc->hn_lro); | |||||
#endif | |||||
hn_destroy_tx_ring(sc); | hn_destroy_tx_ring(sc); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Standard shutdown entry point | * Standard shutdown entry point | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | netvsc_xmit_completion(void *context) | ||||
hn_txdesc_put(sc, txd); | hn_txdesc_put(sc, txd); | ||||
} | } | ||||
void | void | ||||
netvsc_channel_rollup(struct hv_device *device_ctx) | netvsc_channel_rollup(struct hv_device *device_ctx) | ||||
{ | { | ||||
struct hn_softc *sc = device_get_softc(device_ctx->device); | struct hn_softc *sc = device_get_softc(device_ctx->device); | ||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
struct lro_ctrl *lro = &sc->hn_lro; | struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ | ||||
struct lro_ctrl *lro = &rxr->hn_lro; | |||||
struct lro_entry *queued; | struct lro_entry *queued; | ||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { | while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { | ||||
SLIST_REMOVE_HEAD(&lro->lro_active, next); | SLIST_REMOVE_HEAD(&lro->lro_active, next); | ||||
tcp_lro_flush(lro, queued); | tcp_lro_flush(lro, queued); | ||||
} | } | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 399 Lines • ▼ Show 20 Lines | |||||
* specified device | * specified device | ||||
* | * | ||||
* Note: This is no longer used as a callback | * Note: This is no longer used as a callback | ||||
*/ | */ | ||||
int | int | ||||
netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, | netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, | ||||
rndis_tcp_ip_csum_info *csum_info) | rndis_tcp_ip_csum_info *csum_info) | ||||
{ | { | ||||
hn_softc_t *sc = (hn_softc_t *)device_get_softc(device_ctx->device); | struct hn_softc *sc = device_get_softc(device_ctx->device); | ||||
struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ | |||||
struct mbuf *m_new; | struct mbuf *m_new; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
device_t dev = device_ctx->device; | |||||
int size, do_lro = 0, do_csum = 1; | int size, do_lro = 0, do_csum = 1; | ||||
if (sc == NULL) { | if (sc == NULL) { | ||||
return (0); /* TODO: KYS how can this be! */ | return (0); /* TODO: KYS how can this be! */ | ||||
} | } | ||||
ifp = sc->hn_ifp; | ifp = sc->hn_ifp; | ||||
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Bail out if packet contains more data than configured MTU. | * Bail out if packet contains more data than configured MTU. | ||||
*/ | */ | ||||
if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) { | if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) { | ||||
return (0); | return (0); | ||||
} else if (packet->tot_data_buf_len <= MHLEN) { | } else if (packet->tot_data_buf_len <= MHLEN) { | ||||
m_new = m_gethdr(M_NOWAIT, MT_DATA); | m_new = m_gethdr(M_NOWAIT, MT_DATA); | ||||
if (m_new == NULL) | if (m_new == NULL) | ||||
return (0); | return (0); | ||||
memcpy(mtod(m_new, void *), packet->data, | memcpy(mtod(m_new, void *), packet->data, | ||||
packet->tot_data_buf_len); | packet->tot_data_buf_len); | ||||
m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len; | m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len; | ||||
sc->hn_small_pkts++; | rxr->hn_small_pkts++; | ||||
} else { | } else { | ||||
/* | /* | ||||
* Get an mbuf with a cluster. For packets 2K or less, | * Get an mbuf with a cluster. For packets 2K or less, | ||||
* get a standard 2K cluster. For anything larger, get a | * get a standard 2K cluster. For anything larger, get a | ||||
* 4K cluster. Any buffers larger than 4K can cause problems | * 4K cluster. Any buffers larger than 4K can cause problems | ||||
* if looped around to the Hyper-V TX channel, so avoid them. | * if looped around to the Hyper-V TX channel, so avoid them. | ||||
*/ | */ | ||||
size = MCLBYTES; | size = MCLBYTES; | ||||
if (packet->tot_data_buf_len > MCLBYTES) { | if (packet->tot_data_buf_len > MCLBYTES) { | ||||
/* 4096 */ | /* 4096 */ | ||||
size = MJUMPAGESIZE; | size = MJUMPAGESIZE; | ||||
} | } | ||||
m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); | m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); | ||||
if (m_new == NULL) { | if (m_new == NULL) { | ||||
device_printf(dev, "alloc mbuf failed.\n"); | if_printf(ifp, "alloc mbuf failed.\n"); | ||||
return (0); | return (0); | ||||
} | } | ||||
hv_m_append(m_new, packet->tot_data_buf_len, packet->data); | hv_m_append(m_new, packet->tot_data_buf_len, packet->data); | ||||
} | } | ||||
m_new->m_pkthdr.rcvif = ifp; | m_new->m_pkthdr.rcvif = ifp; | ||||
if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) | if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) | ||||
do_csum = 0; | do_csum = 0; | ||||
/* receive side checksum offload */ | /* receive side checksum offload */ | ||||
if (csum_info != NULL) { | if (csum_info != NULL) { | ||||
/* IP csum offload */ | /* IP csum offload */ | ||||
if (csum_info->receive.ip_csum_succeeded && do_csum) { | if (csum_info->receive.ip_csum_succeeded && do_csum) { | ||||
m_new->m_pkthdr.csum_flags |= | m_new->m_pkthdr.csum_flags |= | ||||
(CSUM_IP_CHECKED | CSUM_IP_VALID); | (CSUM_IP_CHECKED | CSUM_IP_VALID); | ||||
sc->hn_csum_ip++; | rxr->hn_csum_ip++; | ||||
} | } | ||||
/* TCP/UDP csum offload */ | /* TCP/UDP csum offload */ | ||||
if ((csum_info->receive.tcp_csum_succeeded || | if ((csum_info->receive.tcp_csum_succeeded || | ||||
csum_info->receive.udp_csum_succeeded) && do_csum) { | csum_info->receive.udp_csum_succeeded) && do_csum) { | ||||
m_new->m_pkthdr.csum_flags |= | m_new->m_pkthdr.csum_flags |= | ||||
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | ||||
m_new->m_pkthdr.csum_data = 0xffff; | m_new->m_pkthdr.csum_data = 0xffff; | ||||
if (csum_info->receive.tcp_csum_succeeded) | if (csum_info->receive.tcp_csum_succeeded) | ||||
sc->hn_csum_tcp++; | rxr->hn_csum_tcp++; | ||||
else | else | ||||
sc->hn_csum_udp++; | rxr->hn_csum_udp++; | ||||
} | } | ||||
if (csum_info->receive.ip_csum_succeeded && | if (csum_info->receive.ip_csum_succeeded && | ||||
csum_info->receive.tcp_csum_succeeded) | csum_info->receive.tcp_csum_succeeded) | ||||
do_lro = 1; | do_lro = 1; | ||||
} else { | } else { | ||||
const struct ether_header *eh; | const struct ether_header *eh; | ||||
uint16_t etype; | uint16_t etype; | ||||
Show All 15 Lines | if (csum_info != NULL) { | ||||
} | } | ||||
if (etype == ETHERTYPE_IP) { | if (etype == ETHERTYPE_IP) { | ||||
int pr; | int pr; | ||||
pr = hn_check_iplen(m_new, hoff); | pr = hn_check_iplen(m_new, hoff); | ||||
if (pr == IPPROTO_TCP) { | if (pr == IPPROTO_TCP) { | ||||
if (do_csum && | if (do_csum && | ||||
(sc->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) { | (rxr->hn_trust_hcsum & | ||||
sc->hn_csum_trusted++; | HN_TRUST_HCSUM_TCP)) { | ||||
rxr->hn_csum_trusted++; | |||||
m_new->m_pkthdr.csum_flags |= | m_new->m_pkthdr.csum_flags |= | ||||
(CSUM_IP_CHECKED | CSUM_IP_VALID | | (CSUM_IP_CHECKED | CSUM_IP_VALID | | ||||
CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | ||||
m_new->m_pkthdr.csum_data = 0xffff; | m_new->m_pkthdr.csum_data = 0xffff; | ||||
} | } | ||||
/* Rely on SW csum verification though... */ | /* Rely on SW csum verification though... */ | ||||
do_lro = 1; | do_lro = 1; | ||||
} else if (pr == IPPROTO_UDP) { | } else if (pr == IPPROTO_UDP) { | ||||
if (do_csum && | if (do_csum && | ||||
(sc->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) { | (rxr->hn_trust_hcsum & | ||||
sc->hn_csum_trusted++; | HN_TRUST_HCSUM_UDP)) { | ||||
rxr->hn_csum_trusted++; | |||||
m_new->m_pkthdr.csum_flags |= | m_new->m_pkthdr.csum_flags |= | ||||
(CSUM_IP_CHECKED | CSUM_IP_VALID | | (CSUM_IP_CHECKED | CSUM_IP_VALID | | ||||
CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); | ||||
m_new->m_pkthdr.csum_data = 0xffff; | m_new->m_pkthdr.csum_data = 0xffff; | ||||
} | } | ||||
} else if (pr != IPPROTO_DONE && do_csum && | } else if (pr != IPPROTO_DONE && do_csum && | ||||
(sc->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { | (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { | ||||
sc->hn_csum_trusted++; | rxr->hn_csum_trusted++; | ||||
m_new->m_pkthdr.csum_flags |= | m_new->m_pkthdr.csum_flags |= | ||||
(CSUM_IP_CHECKED | CSUM_IP_VALID); | (CSUM_IP_CHECKED | CSUM_IP_VALID); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
skip: | skip: | ||||
if ((packet->vlan_tci != 0) && | if ((packet->vlan_tci != 0) && | ||||
(ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { | (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { | ||||
m_new->m_pkthdr.ether_vtag = packet->vlan_tci; | m_new->m_pkthdr.ether_vtag = packet->vlan_tci; | ||||
m_new->m_flags |= M_VLANTAG; | m_new->m_flags |= M_VLANTAG; | ||||
} | } | ||||
/* | /* | ||||
* Note: Moved RX completion back to hv_nv_on_receive() so all | * Note: Moved RX completion back to hv_nv_on_receive() so all | ||||
* messages (not just data messages) will trigger a response. | * messages (not just data messages) will trigger a response. | ||||
*/ | */ | ||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); | if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); | ||||
if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { | if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { | ||||
#if defined(INET) || defined(INET6) | #if defined(INET) || defined(INET6) | ||||
struct lro_ctrl *lro = &sc->hn_lro; | struct lro_ctrl *lro = &rxr->hn_lro; | ||||
if (lro->lro_cnt) { | if (lro->lro_cnt) { | ||||
sc->hn_lro_tried++; | rxr->hn_lro_tried++; | ||||
if (tcp_lro_rx(lro, m_new, 0) == 0) { | if (tcp_lro_rx(lro, m_new, 0) == 0) { | ||||
/* DONE! */ | /* DONE! */ | ||||
return 0; | return 0; | ||||
} | } | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | case SIOCSIFMTU: | ||||
/* Obtain and record requested MTU */ | /* Obtain and record requested MTU */ | ||||
ifp->if_mtu = ifr->ifr_mtu; | ifp->if_mtu = ifr->ifr_mtu; | ||||
/* | /* | ||||
* Make sure that LRO aggregation length limit is still | * Make sure that LRO aggregation length limit is still | ||||
* valid, after the MTU change. | * valid, after the MTU change. | ||||
*/ | */ | ||||
if (sc->hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) | NV_LOCK(sc); | ||||
sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_MIN(ifp); | if (sc->hn_rx_ring[0].hn_lro.lro_length_lim < | ||||
HN_LRO_LENLIM_MIN(ifp)) { | |||||
int i; | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
sc->hn_rx_ring[i].hn_lro.lro_length_lim = | |||||
HN_LRO_LENLIM_MIN(ifp); | |||||
} | |||||
} | |||||
NV_UNLOCK(sc); | |||||
do { | do { | ||||
NV_LOCK(sc); | NV_LOCK(sc); | ||||
if (!sc->temp_unusable) { | if (!sc->temp_unusable) { | ||||
sc->temp_unusable = TRUE; | sc->temp_unusable = TRUE; | ||||
retry_cnt = -1; | retry_cnt = -1; | ||||
} | } | ||||
NV_UNLOCK(sc); | NV_UNLOCK(sc); | ||||
if (retry_cnt > 0) { | if (retry_cnt > 0) { | ||||
▲ Show 20 Lines • Show All 294 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
#endif | #endif | ||||
static int | static int | ||||
hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) | hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct hn_softc *sc = arg1; | struct hn_softc *sc = arg1; | ||||
unsigned int lenlim; | unsigned int lenlim; | ||||
int error; | int error, i; | ||||
lenlim = sc->hn_lro.lro_length_lim; | lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim; | ||||
error = sysctl_handle_int(oidp, &lenlim, 0, req); | error = sysctl_handle_int(oidp, &lenlim, 0, req); | ||||
if (error || req->newptr == NULL) | if (error || req->newptr == NULL) | ||||
return error; | return error; | ||||
if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || | if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || | ||||
lenlim > TCP_LRO_LENGTH_MAX) | lenlim > TCP_LRO_LENGTH_MAX) | ||||
return EINVAL; | return EINVAL; | ||||
sc->hn_lro.lro_length_lim = lenlim; | NV_LOCK(sc); | ||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) | |||||
sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim; | |||||
NV_UNLOCK(sc); | |||||
return 0; | return 0; | ||||
} | } | ||||
static int | static int | ||||
hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) | hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct hn_softc *sc = arg1; | struct hn_softc *sc = arg1; | ||||
int ackcnt, error; | int ackcnt, error, i; | ||||
/* | /* | ||||
* lro_ackcnt_lim is append count limit, | * lro_ackcnt_lim is append count limit, | ||||
* +1 to turn it into aggregation limit. | * +1 to turn it into aggregation limit. | ||||
*/ | */ | ||||
ackcnt = sc->hn_lro.lro_ackcnt_lim + 1; | ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1; | ||||
error = sysctl_handle_int(oidp, &ackcnt, 0, req); | error = sysctl_handle_int(oidp, &ackcnt, 0, req); | ||||
if (error || req->newptr == NULL) | if (error || req->newptr == NULL) | ||||
return error; | return error; | ||||
if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) | if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) | ||||
return EINVAL; | return EINVAL; | ||||
/* | /* | ||||
* Convert aggregation limit back to append | * Convert aggregation limit back to append | ||||
* count limit. | * count limit. | ||||
*/ | */ | ||||
sc->hn_lro.lro_ackcnt_lim = ackcnt - 1; | --ackcnt; | ||||
NV_LOCK(sc); | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) | |||||
sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt; | |||||
NV_UNLOCK(sc); | |||||
return 0; | return 0; | ||||
} | } | ||||
static int | static int | ||||
hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) | hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct hn_softc *sc = arg1; | struct hn_softc *sc = arg1; | ||||
int hcsum = arg2; | int hcsum = arg2; | ||||
int on, error; | int on, error, i; | ||||
on = 0; | on = 0; | ||||
if (sc->hn_trust_hcsum & hcsum) | if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum) | ||||
on = 1; | on = 1; | ||||
error = sysctl_handle_int(oidp, &on, 0, req); | error = sysctl_handle_int(oidp, &on, 0, req); | ||||
if (error || req->newptr == NULL) | if (error || req->newptr == NULL) | ||||
return error; | return error; | ||||
NV_LOCK(sc); | NV_LOCK(sc); | ||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; | |||||
if (on) | if (on) | ||||
sc->hn_trust_hcsum |= hcsum; | rxr->hn_trust_hcsum |= hcsum; | ||||
else | else | ||||
sc->hn_trust_hcsum &= ~hcsum; | rxr->hn_trust_hcsum &= ~hcsum; | ||||
} | |||||
NV_UNLOCK(sc); | NV_UNLOCK(sc); | ||||
return 0; | return 0; | ||||
} | } | ||||
static int | static int | ||||
hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) | hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct hn_softc *sc = arg1; | struct hn_softc *sc = arg1; | ||||
int chimney_size, error; | int chimney_size, error; | ||||
chimney_size = sc->hn_tx_chimney_size; | chimney_size = sc->hn_tx_chimney_size; | ||||
error = sysctl_handle_int(oidp, &chimney_size, 0, req); | error = sysctl_handle_int(oidp, &chimney_size, 0, req); | ||||
if (error || req->newptr == NULL) | if (error || req->newptr == NULL) | ||||
return error; | return error; | ||||
if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0) | if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0) | ||||
return EINVAL; | return EINVAL; | ||||
if (sc->hn_tx_chimney_size != chimney_size) | if (sc->hn_tx_chimney_size != chimney_size) | ||||
sc->hn_tx_chimney_size = chimney_size; | sc->hn_tx_chimney_size = chimney_size; | ||||
return 0; | return 0; | ||||
} | } | ||||
static int | static int | ||||
hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int ofs = arg2, i, error; | |||||
struct hn_rx_ring *rxr; | |||||
u_long stat; | |||||
stat = 0; | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
rxr = &sc->hn_rx_ring[i]; | |||||
stat += *((u_long *)((uint8_t *)rxr + ofs)); | |||||
} | |||||
error = sysctl_handle_long(oidp, &stat, 0, req); | |||||
if (error || req->newptr == NULL) | |||||
return error; | |||||
/* Zero out this stat. */ | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
rxr = &sc->hn_rx_ring[i]; | |||||
*((u_long *)((uint8_t *)rxr + ofs)) = 0; | |||||
} | |||||
return 0; | |||||
} | |||||
static int | |||||
hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct hn_softc *sc = arg1; | |||||
int ofs = arg2, i, error; | |||||
struct hn_rx_ring *rxr; | |||||
uint64_t stat; | |||||
stat = 0; | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
rxr = &sc->hn_rx_ring[i]; | |||||
stat += *((uint64_t *)((uint8_t *)rxr + ofs)); | |||||
} | |||||
error = sysctl_handle_64(oidp, &stat, 0, req); | |||||
if (error || req->newptr == NULL) | |||||
return error; | |||||
/* Zero out this stat. */ | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
rxr = &sc->hn_rx_ring[i]; | |||||
*((uint64_t *)((uint8_t *)rxr + ofs)) = 0; | |||||
} | |||||
return 0; | |||||
} | |||||
static int | |||||
hn_check_iplen(const struct mbuf *m, int hoff) | hn_check_iplen(const struct mbuf *m, int hoff) | ||||
{ | { | ||||
const struct ip *ip; | const struct ip *ip; | ||||
int len, iphlen, iplen; | int len, iphlen, iplen; | ||||
const struct tcphdr *th; | const struct tcphdr *th; | ||||
int thoff; /* TCP data offset */ | int thoff; /* TCP data offset */ | ||||
len = hoff + sizeof(struct ip); | len = hoff + sizeof(struct ip); | ||||
▲ Show 20 Lines • Show All 68 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
bus_addr_t *paddr = arg; | bus_addr_t *paddr = arg; | ||||
if (error) | if (error) | ||||
return; | return; | ||||
KASSERT(nseg == 1, ("too many segments %d!", nseg)); | KASSERT(nseg == 1, ("too many segments %d!", nseg)); | ||||
*paddr = segs->ds_addr; | *paddr = segs->ds_addr; | ||||
} | |||||
static void | |||||
hn_create_rx_data(struct hn_softc *sc) | |||||
{ | |||||
struct sysctl_oid_list *child; | |||||
struct sysctl_ctx_list *ctx; | |||||
device_t dev = sc->hn_dev; | |||||
#if defined(INET) || defined(INET6) | |||||
#if __FreeBSD_version >= 1100095 | |||||
int lroent_cnt; | |||||
#endif | |||||
#endif | |||||
int i; | |||||
sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */ | |||||
sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt, | |||||
M_NETVSC, M_WAITOK | M_ZERO); | |||||
#if defined(INET) || defined(INET6) | |||||
#if __FreeBSD_version >= 1100095 | |||||
lroent_cnt = hn_lro_entry_count; | |||||
if (lroent_cnt < TCP_LRO_ENTRIES) | |||||
lroent_cnt = TCP_LRO_ENTRIES; | |||||
device_printf(dev, "LRO: entry count %d\n", lroent_cnt); | |||||
#endif | |||||
#endif /* INET || INET6 */ | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { | |||||
struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; | |||||
if (hn_trust_hosttcp) | |||||
rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; | |||||
if (hn_trust_hostudp) | |||||
rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; | |||||
if (hn_trust_hostip) | |||||
rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; | |||||
/* | |||||
* Initialize LRO. | |||||
*/ | |||||
#if defined(INET) || defined(INET6) | |||||
#if __FreeBSD_version >= 1100095 | |||||
tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0); | |||||
#else | |||||
tcp_lro_init(&rxr->hn_lro); | |||||
rxr->hn_lro.ifp = sc->hn_ifp; | |||||
#endif | |||||
rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; | |||||
rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; | |||||
#endif /* INET || INET6 */ | |||||
} | |||||
ctx = device_get_sysctl_ctx(dev); | |||||
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued", | |||||
CTLTYPE_U64 | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_lro.lro_queued), | |||||
hn_rx_stat_u64_sysctl, "LU", "LRO queued"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed", | |||||
CTLTYPE_U64 | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_lro.lro_flushed), | |||||
hn_rx_stat_u64_sysctl, "LU", "LRO flushed"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_lro_tried), | |||||
hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", | |||||
CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", | |||||
"Max # of data bytes to be aggregated by LRO"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", | |||||
"Max # of ACKs to be aggregated by LRO"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust tcp segement verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust udp datagram verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", | |||||
CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, | |||||
hn_trust_hcsum_sysctl, "I", | |||||
"Trust ip packet verification on host side, " | |||||
"when csum info is missing"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_csum_ip), | |||||
hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_csum_tcp), | |||||
hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_csum_udp), | |||||
hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_csum_trusted), | |||||
hn_rx_stat_ulong_sysctl, "LU", | |||||
"# of packets that we trust host's csum verification"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts", | |||||
CTLTYPE_ULONG | CTLFLAG_RW, sc, | |||||
__offsetof(struct hn_rx_ring, hn_small_pkts), | |||||
hn_rx_stat_ulong_sysctl, "LU", "# of small packets received"); | |||||
} | |||||
static void | |||||
hn_destroy_rx_data(struct hn_softc *sc) | |||||
{ | |||||
#if defined(INET) || defined(INET6) | |||||
int i; | |||||
#endif | |||||
if (sc->hn_rx_ring_cnt == 0) | |||||
return; | |||||
#if defined(INET) || defined(INET6) | |||||
for (i = 0; i < sc->hn_rx_ring_cnt; ++i) | |||||
tcp_lro_free(&sc->hn_rx_ring[i].hn_lro); | |||||
#endif | |||||
free(sc->hn_rx_ring, M_NETVSC); | |||||
sc->hn_rx_ring = NULL; | |||||
sc->hn_rx_ring_cnt = 0; | |||||
} | } | ||||
static int | static int | ||||
hn_create_tx_ring(struct hn_softc *sc) | hn_create_tx_ring(struct hn_softc *sc) | ||||
{ | { | ||||
bus_dma_tag_t parent_dtag; | bus_dma_tag_t parent_dtag; | ||||
int error, i; | int error, i; | ||||
▲ Show 20 Lines • Show All 191 Lines • Show Last 20 Lines |