Index: sys/dev/hyperv/netvsc/hn_nvs.h =================================================================== --- sys/dev/hyperv/netvsc/hn_nvs.h +++ sys/dev/hyperv/netvsc/hn_nvs.h @@ -100,6 +100,7 @@ int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan, struct hn_nvs_sendctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt); +void hn_nvs_set_vf(struct hn_softc *sc, bool vf); extern struct hn_nvs_sendctx hn_nvs_sendctx_none; Index: sys/dev/hyperv/netvsc/hn_nvs.c =================================================================== --- sys/dev/hyperv/netvsc/hn_nvs.c +++ sys/dev/hyperv/netvsc/hn_nvs.c @@ -500,6 +500,8 @@ conf.nvs_type = HN_NVS_TYPE_NDIS_CONF; conf.nvs_mtu = mtu; conf.nvs_caps = HN_NVS_NDIS_CONF_VLAN; + if (sc->hn_nvs_ver >= HN_NVS_VERSION_5) + conf.nvs_caps |= HN_NVS_NDIS_CONF_SRIOV; /* NOTE: No response. */ error = hn_nvs_req_send(sc, &conf, sizeof(conf)); @@ -719,3 +721,16 @@ return hn_nvs_send_rndis_sglist(chan, HN_NVS_RNDIS_MTYPE_CTRL, sndc, gpa, gpa_cnt); } + +void +hn_nvs_set_vf(struct hn_softc *sc, bool vf) +{ + struct hn_nvs_set_datapath dp; + + memset(&dp, 0, sizeof(dp)); + dp.nvs_type = HN_NVS_TYPE_SET_DATAPATH; + dp.nvs_active_path = vf ? HN_NVS_DATAPATH_VF : + HN_NVS_DATAPATH_SYNTHETIC; + + hn_nvs_req_send(sc, &dp, sizeof(dp)); +} Index: sys/dev/hyperv/netvsc/if_hn.c =================================================================== --- sys/dev/hyperv/netvsc/if_hn.c +++ sys/dev/hyperv/netvsc/if_hn.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include @@ -84,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -687,13 +689,17 @@ static int hn_set_rxfilter(struct hn_softc *sc, uint32_t filter) { + bool vf_enabled = sc->hn_flags & HN_FLAG_VF; int error = 0; HN_LOCK_ASSERT(sc); + if (vf_enabled) + filter = NDIS_PACKET_TYPE_PROMISCUOUS; + if (sc->hn_rx_filter != filter) { error = hn_rndis_set_rxfilter(sc, filter); - if (!error) + if (!error && !vf_enabled) sc->hn_rx_filter = filter; } return (error); @@ -896,6 +902,123 @@ ifmr->ifm_active |= IFM_10G_T | IFM_FDX; } +struct hn_update_vf { + struct hn_rx_ring *rxr; + struct ifnet *vf; +}; + +static void +hn_update_vf_task(void *arg, int pending __unused) +{ + struct hn_update_vf *uv = arg; + struct hn_rx_ring *rxr = uv->rxr; + struct ifnet *vf = uv->vf; + + rxr->hn_vf = vf; +} + +static void +hn_update_vf(struct hn_softc *sc, struct ifnet *vf) +{ + struct hn_rx_ring *rxr; + struct hn_update_vf uv; + struct task task; + int i; + + TASK_INIT(&task, 0, hn_update_vf_task, &uv); + + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + + if (i < sc->hn_rx_ring_inuse) { + uv.rxr = rxr; + uv.vf = vf; + vmbus_chan_run_task(rxr->hn_chan, &task); + } else { + rxr->hn_vf = vf; + } + } +} + +static void +hn_set_vf(struct hn_softc *sc, struct ifnet *ifp, bool vf) +{ + struct ifnet *hn_ifp = sc->hn_ifp; + + if (vf) { + sc->hn_flags |= HN_FLAG_VF; + hn_set_rxfilter(sc, NDIS_PACKET_TYPE_PROMISCUOUS); + } else { + sc->hn_flags &= ~HN_FLAG_VF; + hn_rxfilter_config(sc); + } + + hn_nvs_set_vf(sc, vf); + + hn_update_vf(sc, vf ? ifp : NULL); + + if (bootverbose) + if_printf(hn_ifp, "Data path is switched %s %s\n", + vf ? "to" : "from", if_name(ifp)); +} + +static void +hn_ifnet_event(void *arg, struct ifnet *ifp, int event) +{ + struct hn_softc *sc = arg; + struct ifnet *hn_ifp; + + HN_LOCK(sc); + + hn_ifp = sc->hn_ifp; + + if (ifp == hn_ifp) + goto out; + + if (bcmp(IF_LLADDR(ifp), IF_LLADDR(hn_ifp), ETHER_ADDR_LEN) != 0) + goto out; + + switch (event) { + case IFNET_EVENT_UP: + hn_set_vf(sc, ifp, true); + break; + + case IFNET_EVENT_DOWN: + hn_set_vf(sc, ifp, false); + break; + + default: + break; + } + +out: + HN_UNLOCK(sc); +} + +static void +hn_ifaddr_event(void *arg, struct ifnet *ifp) +{ + struct hn_softc *sc = arg; + struct ifnet *hn_ifp; + + HN_LOCK(sc); + + hn_ifp = sc->hn_ifp; + + if (ifp == hn_ifp) + goto out; + + if (bcmp(IF_LLADDR(ifp), IF_LLADDR(hn_ifp), ETHER_ADDR_LEN) != 0) + goto out; + + if ((ifp->if_flags & IFF_UP) && !(sc->hn_flags & HN_FLAG_VF)) + hn_set_vf(sc, ifp, true); + else if (!(ifp->if_flags & IFF_UP) && (sc->hn_flags & HN_FLAG_VF)) + hn_set_vf(sc, ifp, false); +out: + HN_UNLOCK(sc); +} + /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */ static const struct hyperv_guid g_net_vsc_device_type = { .hv_guid = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, @@ -1221,6 +1344,12 @@ sc->hn_mgmt_taskq = sc->hn_mgmt_taskq0; hn_update_link_status(sc); + sc->ifnet_event_hndl_tag = EVENTHANDLER_REGISTER(ifnet_event, + hn_ifnet_event, sc, EVENTHANDLER_PRI_ANY); + + sc->ifaddr_event_hndl_tag = EVENTHANDLER_REGISTER(ifaddr_event, + hn_ifaddr_event, sc, EVENTHANDLER_PRI_ANY); + return (0); failed: if (sc->hn_flags & HN_FLAG_SYNTH_ATTACHED) @@ -1235,6 +1364,9 @@ struct hn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->hn_ifp; + EVENTHANDLER_DEREGISTER(ifaddr_event, sc->ifaddr_event_hndl_tag); + EVENTHANDLER_DEREGISTER(ifnet_event, sc->ifnet_event_hndl_tag); + if (sc->hn_xact != NULL && vmbus_chan_is_revoked(sc->hn_prichan)) { /* * In case that the vmbus missed the orphan handler @@ -2125,6 +2257,7 @@ const struct hn_rxinfo *info) { struct ifnet *ifp = rxr->hn_ifp; + struct ifnet *hn_vf = rxr->hn_vf; struct mbuf *m_new; int size, do_lro = 0, do_csum = 1; int hash_type; @@ -2159,7 +2292,8 @@ hv_m_append(m_new, dlen, data); } - m_new->m_pkthdr.rcvif = ifp; + + m_new->m_pkthdr.rcvif = hn_vf ? hn_vf : ifp; if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0)) do_csum = 0; @@ -2307,6 +2441,9 @@ } M_HASHTYPE_SET(m_new, hash_type); + if (hn_vf != NULL) + do_lro = 0; + /* * Note: Moved RX completion back to hv_nv_on_receive() so all * messages (not just data messages) will trigger a response. @@ -2329,6 +2466,10 @@ #endif } + /* Inject the packet into the VF interface. */ + if (hn_vf != NULL) + ifp = hn_vf; + /* We're not holding the lock here, so don't release it */ (*ifp->if_input)(ifp, m_new); Index: sys/dev/hyperv/netvsc/if_hnreg.h =================================================================== --- sys/dev/hyperv/netvsc/if_hnreg.h +++ sys/dev/hyperv/netvsc/if_hnreg.h @@ -133,6 +133,17 @@ } __packed; CTASSERT(sizeof(struct hn_nvs_ndis_init) >= HN_NVS_REQSIZE_MIN); +#define HN_NVS_DATAPATH_SYNTHETIC 0 +#define HN_NVS_DATAPATH_VF 1 + +/* No response */ +struct hn_nvs_set_datapath { + uint32_t nvs_type; /* HN_NVS_TYPE_SET_DATAPATH */ + uint32_t nvs_active_path;/* HN_NVS_DATAPATH_* */ + uint32_t nvs_rsvd[6]; +} __packed; +CTASSERT(sizeof(struct hn_nvs_set_datapath) >= HN_NVS_REQSIZE_MIN); + struct hn_nvs_rxbuf_conn { uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONN */ uint32_t nvs_gpadl; /* RXBUF vmbus GPADL */ Index: sys/dev/hyperv/netvsc/if_hnvar.h =================================================================== --- sys/dev/hyperv/netvsc/if_hnvar.h +++ sys/dev/hyperv/netvsc/if_hnvar.h @@ -59,6 +59,7 @@ struct hn_rx_ring { struct ifnet *hn_ifp; + struct ifnet *hn_vf; /* SR-IOV VF */ struct hn_tx_ring *hn_txr; void *hn_pktbuf; int hn_pktbuf_len; @@ -234,6 +235,9 @@ int hn_rss_ind_size; uint32_t hn_rss_hash; /* NDIS_HASH_ */ struct ndis_rssprm_toeplitz hn_rss; + + eventhandler_tag ifaddr_event_hndl_tag; + eventhandler_tag ifnet_event_hndl_tag; }; #define HN_FLAG_RXBUF_CONNECTED 0x0001 @@ -244,6 +248,7 @@ #define HN_FLAG_NO_SLEEPING 0x0020 #define HN_FLAG_RXBUF_REF 0x0040 #define HN_FLAG_CHIM_REF 0x0080 +#define HN_FLAG_VF 0x0100 #define HN_FLAG_ERRORS (HN_FLAG_RXBUF_REF | HN_FLAG_CHIM_REF)