Index: sys/dev/ixl/if_ixl.c =================================================================== --- sys/dev/ixl/if_ixl.c +++ sys/dev/ixl/if_ixl.c @@ -84,7 +84,7 @@ static int ixl_detach(device_t); static int ixl_shutdown(device_t); static int ixl_get_hw_capabilities(struct ixl_pf *); -static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); +static void ixl_cap_txcsum_tso(struct ixl_ifx *, struct ifnet *, int); static int ixl_ioctl(struct ifnet *, u_long, caddr_t); static void ixl_init(void *); static void ixl_init_locked(struct ixl_pf *); @@ -95,8 +95,8 @@ static int ixl_allocate_pci_resources(struct ixl_pf *); static u16 ixl_get_bus_info(struct i40e_hw *, device_t); static int ixl_setup_stations(struct ixl_pf *); -static int ixl_setup_vsi(struct ixl_vsi *); -static int ixl_initialize_vsi(struct ixl_vsi *); +static int ixl_setup_vsi(struct ixl_ifx *); +static int ixl_initialize_vsi(struct ixl_ifx *); static int ixl_assign_vsi_msix(struct ixl_pf *); static int ixl_assign_vsi_legacy(struct ixl_pf *); static int ixl_init_msix(struct ixl_pf *); @@ -105,17 +105,17 @@ static void ixl_configure_legacy(struct ixl_pf *); static void ixl_free_pci_resources(struct ixl_pf *); static void ixl_local_timer(void *); -static int ixl_setup_interface(device_t, struct ixl_vsi *); +static int ixl_setup_interface(device_t, struct ixl_ifx *); static bool ixl_config_link(struct i40e_hw *); -static void ixl_config_rss(struct ixl_vsi *); +static void ixl_config_rss(struct ixl_ifx *); static void ixl_set_queue_rx_itr(struct ixl_queue *); static void ixl_set_queue_tx_itr(struct ixl_queue *); static int ixl_set_advertised_speeds(struct ixl_pf *, int); -static void ixl_enable_rings(struct ixl_vsi *); -static void ixl_disable_rings(struct ixl_vsi *); -static void ixl_enable_intr(struct ixl_vsi *); -static void ixl_disable_intr(struct ixl_vsi *); +static int ixl_enable_rings(struct ixl_vsi *); +static int ixl_disable_rings(struct ixl_vsi *); +static void ixl_enable_intr(struct ixl_ifx *); +static void ixl_disable_intr(struct ixl_ifx *); static void ixl_enable_adminq(struct i40e_hw *); static void ixl_disable_adminq(struct i40e_hw *); @@ -124,21 +124,23 @@ static void ixl_enable_legacy(struct i40e_hw *); static void ixl_disable_legacy(struct i40e_hw *); -static void ixl_set_promisc(struct ixl_vsi *); -static void ixl_add_multi(struct ixl_vsi *); -static void ixl_del_multi(struct ixl_vsi *); +static void ixl_set_promisc(struct ixl_ifx *); +static void ixl_add_multi(struct ixl_ifx *); +static void ixl_del_multi(struct ixl_ifx *); static void ixl_register_vlan(void *, struct ifnet *, u16); static void ixl_unregister_vlan(void *, struct ifnet *, u16); static void ixl_setup_vlan_filters(struct ixl_vsi *); -static void ixl_init_filters(struct ixl_vsi *); +static void ixl_init_filters(struct ixl_ifx *); static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_add_hw_filters(struct ixl_vsi *, int, int); static void ixl_del_hw_filters(struct ixl_vsi *, int); static struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, u8 *, s16); -static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); +static void ixl_add_mc_filter(struct ixl_ifx *, u8 *); +static void ixl_free_mac_filters(struct ixl_vsi *vsi); + /* Sysctl debug interface */ static int ixl_debug_info(SYSCTL_HANDLER_ARGS); @@ -168,6 +170,7 @@ struct i40e_eth_stats *); static void ixl_update_stats_counters(struct ixl_pf *); static void ixl_update_eth_stats(struct ixl_vsi *); +static void ixl_update_ifx_stats(struct ixl_ifx *); static void ixl_pf_reset_stats(struct ixl_pf *); static void ixl_vsi_reset_stats(struct ixl_vsi *); static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, @@ -281,6 +284,9 @@ #endif +static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + static char *ixl_fc_string[6] = { "None", "Rx", @@ -363,7 +369,7 @@ { struct ixl_pf *pf; struct i40e_hw *hw; - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; u16 bus; int error = 0; @@ -378,8 +384,8 @@ ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ - vsi = &pf->vsi; - vsi->dev = pf->dev; + ifx = &pf->ifx; + ifx->dev = pf->dev; /* Core Lock Init*/ IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); @@ -584,7 +590,7 @@ } /* Set up host memory cache */ - error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0); + error = i40e_init_lan_hmc(hw, ifx->vsi.num_queues, ifx->vsi.num_queues, 0, 0); if (error) { device_printf(dev, "init_lan_hmc failed: %d\n", error); goto err_get_cap; @@ -616,7 +622,7 @@ } /* Initialize mac filter list for VSI */ - SLIST_INIT(&vsi->ftl); + SLIST_INIT(&ifx->vsi.ftl); /* Set up interrupt routing here */ if (pf->msix > 1) @@ -634,10 +640,10 @@ } /* Determine link state */ - vsi->link_up = ixl_config_link(hw); + ifx->link_up = ixl_config_link(hw); /* Report if Unqualified modules are found */ - if ((vsi->link_up == FALSE) && + if ((ifx->link_up == FALSE) && (pf->hw.phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(pf->hw.phy.link_info.an_info & @@ -646,7 +652,7 @@ "an unqualified module was detected\n"); /* Setup OS specific network interface */ - if (ixl_setup_interface(dev, vsi) != 0) { + if (ixl_setup_interface(dev, ifx) != 0) { device_printf(dev, "interface setup failed!\n"); error = EIO; goto err_late; @@ -669,25 +675,25 @@ } /* Register for VLAN events */ - vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); - vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); + ifx->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixl_register_vlan, ifx, EVENTHANDLER_PRI_FIRST); + ifx->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixl_unregister_vlan, ifx, EVENTHANDLER_PRI_FIRST); INIT_DEBUGOUT("ixl_attach: end"); return (0); err_late: - if (vsi->ifp != NULL) - if_free(vsi->ifp); + if (ifx->ifp != NULL) + if_free(ifx->ifp); err_mac_hmc: i40e_shutdown_lan_hmc(hw); err_get_cap: i40e_shutdown_adminq(hw); err_out: ixl_free_pci_resources(pf); - ixl_free_vsi(vsi); + ixl_free_ifx(ifx); IXL_PF_LOCK_DESTROY(pf); return (error); } @@ -707,14 +713,14 @@ { struct ixl_pf *pf = device_get_softc(dev); struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; i40e_status status; INIT_DEBUGOUT("ixl_detach: begin"); /* Make sure VLANS are not using driver */ - if (vsi->ifp->if_vlantrunk != NULL) { + if (ifx->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } @@ -723,7 +729,7 @@ ixl_stop(pf); IXL_PF_UNLOCK(pf); - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { if (que->tq) { taskqueue_drain(que->tq, &que->task); taskqueue_drain(que->tq, &que->tx_task); @@ -744,19 +750,19 @@ "Shutdown Admin queue failed with code %d\n", status); /* Unregister VLAN events */ - if (vsi->vlan_attach != NULL) - EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); - if (vsi->vlan_detach != NULL) - EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); + if (ifx->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, ifx->vlan_attach); + if (ifx->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, ifx->vlan_detach); - ether_ifdetach(vsi->ifp); + ether_ifdetach(ifx->ifp); callout_drain(&pf->timer); ixl_free_pci_resources(pf); bus_generic_detach(dev); - if_free(vsi->ifp); - ixl_free_vsi(vsi); + if_free(ifx->ifp); + ixl_free_ifx(ifx); IXL_PF_LOCK_DESTROY(pf); return (0); } @@ -837,9 +843,9 @@ } static void -ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) +ixl_cap_txcsum_tso(struct ixl_ifx *ifx, struct ifnet *ifp, int mask) { - device_t dev = vsi->dev; + device_t dev = ifx->dev; /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) @@ -847,14 +853,14 @@ if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ - if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + if (ifx->flags & IXL_FLAGS_KEEP_TSO4) { + ifx->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + ifx->flags &= ~IXL_FLAGS_KEEP_TSO4; device_printf(dev, "TSO4 requires txcsum, enabling both...\n"); } @@ -867,7 +873,7 @@ } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { - vsi->flags |= IXL_FLAGS_KEEP_TSO4; + ifx->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); device_printf(dev, "TSO4 requires txcsum, disabling both...\n"); @@ -880,13 +886,13 @@ && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; - if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + if (ifx->flags & IXL_FLAGS_KEEP_TSO6) { + ifx->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + ifx->flags &= ~IXL_FLAGS_KEEP_TSO6; device_printf(dev, "TSO6 requires txcsum6, enabling both...\n"); } @@ -899,7 +905,7 @@ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { - vsi->flags |= IXL_FLAGS_KEEP_TSO6; + ifx->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); device_printf(dev, "TSO6 requires txcsum6, disabling both...\n"); @@ -920,8 +926,8 @@ static int ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct ixl_pf *pf = ifx->back; struct ifreq *ifr = (struct ifreq *) data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; @@ -965,7 +971,7 @@ } else { IXL_PF_LOCK(pf); ifp->if_mtu = ifr->ifr_mtu; - vsi->max_frame_size = + ifx->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; ixl_init_locked(pf); @@ -979,7 +985,7 @@ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ pf->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { - ixl_set_promisc(vsi); + ixl_set_promisc(ifx); } } else ixl_init_locked(pf); @@ -993,9 +999,9 @@ IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); - ixl_disable_intr(vsi); - ixl_add_multi(vsi); - ixl_enable_intr(vsi); + ixl_disable_intr(ifx); + ixl_add_multi(ifx); + ixl_enable_intr(ifx); IXL_PF_UNLOCK(pf); } break; @@ -1003,23 +1009,23 @@ IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); - ixl_disable_intr(vsi); - ixl_del_multi(vsi); - ixl_enable_intr(vsi); + ixl_disable_intr(ifx); + ixl_del_multi(ifx); + ixl_enable_intr(ifx); IXL_PF_UNLOCK(pf); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); - error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); + error = ifmedia_ioctl(ifp, ifr, &ifx->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); - ixl_cap_txcsum_tso(vsi, ifp, mask); + ixl_cap_txcsum_tso(ifx, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; @@ -1068,8 +1074,8 @@ ixl_init_locked(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = &pf->ifx; + struct ifnet *ifp = ifx->ifp; device_t dev = pf->dev; struct i40e_filter_control_settings filter; u8 tmpaddr[ETHER_ADDR_LEN]; @@ -1080,7 +1086,7 @@ ixl_stop(pf); /* Get the latest mac address... User might use a LAA */ - bcopy(IF_LLADDR(vsi->ifp), tmpaddr, + bcopy(IF_LLADDR(ifx->ifp), tmpaddr, I40E_ETH_LENGTH_OF_ADDRESS); if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && i40e_validate_mac_addr(tmpaddr)) { @@ -1116,24 +1122,27 @@ device_printf(dev, "set_filter_control() failed\n"); /* Set up RSS */ - ixl_config_rss(vsi); + ixl_config_rss(ifx); /* Setup the VSI */ - ixl_setup_vsi(vsi); + if (ixl_setup_vsi(ifx)) { + device_printf(dev,"ixl_setup_vsi() failed!\n"); + return; + } /* ** Prepare the rings, hmc contexts, etc... */ - if (ixl_initialize_vsi(vsi)) { + if (ixl_initialize_vsi(ifx)) { device_printf(dev, "initialize vsi failed!!\n"); return; } /* Add protocol filters to list */ - ixl_init_filters(vsi); + ixl_init_filters(ifx); /* Setup vlan's if needed */ - ixl_setup_vlan_filters(vsi); + ixl_setup_vlan_filters(&ifx->vsi); /* Start the local timer */ callout_reset(&pf->timer, hz, ixl_local_timer, pf); @@ -1145,20 +1154,21 @@ } else ixl_configure_legacy(pf); - ixl_enable_rings(vsi); + ixl_enable_rings(&ifx->vsi); + + i40e_aq_set_default_vsi(hw, ifx->vsi.seid, NULL); - i40e_aq_set_default_vsi(hw, vsi->seid, NULL); /* Set MTU in hardware*/ - int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, + int aq_error = i40e_aq_set_mac_config(hw, ifx->max_frame_size, TRUE, 0, NULL); if (aq_error) - device_printf(vsi->dev, + device_printf(ifx->dev, "aq_set_mac_config in init error, code %d\n", aq_error); /* And now turn on interrupts */ - ixl_enable_intr(vsi); + ixl_enable_intr(ifx); /* Now inform the stack we're ready */ ifp->if_drv_flags |= IFF_DRV_RUNNING; @@ -1187,10 +1197,10 @@ ixl_handle_que(void *context, int pending) { struct ixl_queue *que = context; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { @@ -1222,9 +1232,9 @@ { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; + struct ifnet *ifp = ifx->ifp; struct tx_ring *txr = &que->txr; u32 reg, icr0, mask; bool more_tx, more_rx; @@ -1252,7 +1262,7 @@ IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); - if (!drbr_empty(vsi->ifp, txr->br)) + if (!drbr_empty(ifx->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); @@ -1284,13 +1294,13 @@ ixl_msix_que(void *arg) { struct ixl_queue *que = arg; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Protect against spurious interrupts */ - if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) + if (!(ifx->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; ++que->irqs; @@ -1304,7 +1314,7 @@ ** has anything queued the task gets ** scheduled to handle it. */ - if (!drbr_empty(vsi->ifp, txr->br)) + if (!drbr_empty(ifx->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); @@ -1368,8 +1378,8 @@ static void ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct ixl_pf *pf = ifx->back; struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); @@ -1380,7 +1390,7 @@ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; - if (!vsi->link_up) { + if (!ifx->link_up) { IXL_PF_UNLOCK(pf); return; } @@ -1461,8 +1471,8 @@ static int ixl_media_change(struct ifnet * ifp) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ifmedia *ifm = &vsi->media; + struct ixl_ifx *ifx = ifp->if_softc; + struct ifmedia *ifm = &ifx->media; INIT_DEBUGOUT("ixl_media_change: begin"); @@ -1485,7 +1495,7 @@ void ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype) { - struct ixl_vsi *vsi = que->vsi; + struct ixl_ifx *ifx = que->ifx; struct tx_ring *txr = &que->txr; struct i40e_filter_program_desc *FDIR; u32 ptype, dtype; @@ -1521,7 +1531,7 @@ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); - ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; + ptype |= ifx->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; dtype = I40E_TX_DESC_DTYPE_FILTER_PROG; @@ -1549,10 +1559,10 @@ static void -ixl_set_promisc(struct ixl_vsi *vsi) +ixl_set_promisc(struct ixl_ifx *ifx) { - struct ifnet *ifp = vsi->ifp; - struct i40e_hw *hw = vsi->hw; + struct ifnet *ifp = ifx->ifp; + struct i40e_hw *hw = ifx->hw; int err, mcnt = 0; bool uni = FALSE, multi = FALSE; @@ -1577,9 +1587,9 @@ uni = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, - vsi->seid, uni, NULL); + ifx->vsi.seid, uni, NULL); err = i40e_aq_set_vsi_multicast_promiscuous(hw, - vsi->seid, multi, NULL); + ifx->vsi.seid, multi, NULL); return; } @@ -1590,11 +1600,11 @@ * *********************************************************************/ static void -ixl_add_multi(struct ixl_vsi *vsi) +ixl_add_multi(struct ixl_ifx *ifx) { struct ifmultiaddr *ifma; - struct ifnet *ifp = vsi->ifp; - struct i40e_hw *hw = vsi->hw; + struct ifnet *ifp = ifx->ifp; + struct i40e_hw *hw = ifx->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); @@ -1613,9 +1623,9 @@ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ - ixl_del_hw_filters(vsi, mcnt); + ixl_del_hw_filters(&ifx->vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, - vsi->seid, TRUE, NULL); + ifx->vsi.seid, TRUE, NULL); return; } @@ -1624,14 +1634,14 @@ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; - ixl_add_mc_filter(vsi, + ixl_add_mc_filter(ifx, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); - ixl_add_hw_filters(vsi, flags, mcnt); + ixl_add_hw_filters(&ifx->vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); @@ -1639,9 +1649,9 @@ } static void -ixl_del_multi(struct ixl_vsi *vsi) +ixl_del_multi(struct ixl_ifx *ifx) { - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; struct ifmultiaddr *ifma; struct ixl_mac_filter *f; int mcnt = 0; @@ -1651,7 +1661,7 @@ /* Search for removed multicast addresses */ if_maddr_rlock(ifp); - SLIST_FOREACH(f, &vsi->ftl, next) { + SLIST_FOREACH(f, &ifx->vsi.ftl, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { @@ -1672,7 +1682,7 @@ if_maddr_runlock(ifp); if (mcnt > 0) - ixl_del_hw_filters(vsi, mcnt); + ixl_del_hw_filters(&ifx->vsi, mcnt); } @@ -1689,8 +1699,8 @@ { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; device_t dev = pf->dev; int hung = 0; u32 mask; @@ -1709,7 +1719,7 @@ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); - for (int i = 0; i < vsi->num_queues; i++,que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++,que++) { /* Any queues with outstanding work get a sw irq */ if (que->busy) wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); @@ -1721,12 +1731,12 @@ if (que->busy == IXL_QUEUE_HUNG) { ++hung; /* Mark the queue as inactive */ - vsi->active_queues &= ~((u64)1 << que->me); + ifx->active_queues &= ~((u64)1 << que->me); continue; } else { /* Check if we've come back from hung */ - if ((vsi->active_queues & ((u64)1 << que->me)) == 0) - vsi->active_queues |= ((u64)1 << que->me); + if ((ifx->active_queues & ((u64)1 << que->me)) == 0) + ifx->active_queues |= ((u64)1 << que->me); } if (que->busy >= IXL_MAX_TX_BUSY) { #ifdef IXL_DEBUG @@ -1738,7 +1748,7 @@ } } /* Only reinit if all queues show hung */ - if (hung == vsi->num_queues) + if (hung == ifx->vsi.num_queues) goto hung; callout_reset(&pf->timer, hz, ixl_local_timer, pf); @@ -1757,24 +1767,24 @@ static void ixl_update_link_status(struct ixl_pf *pf) { - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_ifx *ifx = &pf->ifx; struct i40e_hw *hw = &pf->hw; - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; device_t dev = pf->dev; enum i40e_fc_mode fc; - if (vsi->link_up){ - if (vsi->link_active == FALSE) { + if (ifx->link_up){ + if (ifx->link_active == FALSE) { i40e_aq_get_link_info(hw, TRUE, NULL, NULL); if (bootverbose) { fc = hw->fc.current_mode; device_printf(dev,"Link is up %d Gbps %s," " Flow Control: %s\n", - ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10), + ((ifx->link_speed == I40E_LINK_SPEED_40GB)? 40:10), "Full Duplex", ixl_fc_string[fc]); } - vsi->link_active = TRUE; + ifx->link_active = TRUE; /* ** Warn user if link speed on NPAR enabled ** partition is not at least 10GB @@ -1787,11 +1797,11 @@ if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ - if (vsi->link_active == TRUE) { + if (ifx->link_active == TRUE) { if (bootverbose) device_printf(dev,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); - vsi->link_active = FALSE; + ifx->link_active = FALSE; } } @@ -1808,14 +1818,14 @@ static void ixl_stop(struct ixl_pf *pf) { - struct ixl_vsi *vsi = &pf->vsi; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = &pf->ifx; + struct ifnet *ifp = ifx->ifp; mtx_assert(&pf->pf_mtx, MA_OWNED); INIT_DEBUGOUT("ixl_stop: begin\n"); - ixl_disable_intr(vsi); - ixl_disable_rings(vsi); + ixl_disable_intr(ifx); + ixl_disable_rings(&ifx->vsi); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); @@ -1836,8 +1846,8 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf) { device_t dev = pf->dev; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; int error, rid = 0; if (pf->msix == 1) @@ -1885,8 +1895,8 @@ ixl_assign_vsi_msix(struct ixl_pf *pf) { device_t dev = pf->dev; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; struct tx_ring *txr; int error, rid, vector = 0; @@ -1919,7 +1929,7 @@ ++vector; /* Now set up the stations */ - for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; @@ -2049,7 +2059,7 @@ device_printf(pf->dev, "Using MSIX interrupts with %d vectors\n", vectors); pf->msix = vectors; - pf->vsi.num_queues = queues; + pf->ifx.vsi.num_queues = queues; #ifdef RSS /* * If we're doing RSS, the number of queues needs to @@ -2073,7 +2083,7 @@ } msi: vectors = pci_msi_count(dev); - pf->vsi.num_queues = 1; + pf->ifx.vsi.num_queues = 1; pf->msix = 1; ixl_max_queues = 1; ixl_enable_msix = 0; @@ -2094,7 +2104,7 @@ ixl_configure_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_ifx *ifx = &pf->ifx; u32 reg; u16 vector = 1; @@ -2121,7 +2131,7 @@ wr32(hw, I40E_PFINT_STAT_CTL0, 0); /* Next configure the queues */ - for (int i = 0; i < vsi->num_queues; i++, vector++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, vector++) { wr32(hw, I40E_PFINT_DYN_CTLN(i), i); wr32(hw, I40E_PFINT_LNKLSTN(i), i); @@ -2137,7 +2147,7 @@ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - if (i == (vsi->num_queues - 1)) + if (i == (ifx->vsi.num_queues - 1)) reg |= (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); @@ -2211,27 +2221,27 @@ ixl_configure_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; - vsi->rx_itr_setting = ixl_rx_itr; + ifx->rx_itr_setting = ixl_rx_itr; if (ixl_dynamic_rx_itr) - vsi->rx_itr_setting |= IXL_ITR_DYNAMIC; - vsi->tx_itr_setting = ixl_tx_itr; + ifx->rx_itr_setting |= IXL_ITR_DYNAMIC; + ifx->tx_itr_setting = ixl_tx_itr; if (ixl_dynamic_tx_itr) - vsi->tx_itr_setting |= IXL_ITR_DYNAMIC; + ifx->tx_itr_setting |= IXL_ITR_DYNAMIC; - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), - vsi->rx_itr_setting); - rxr->itr = vsi->rx_itr_setting; + ifx->rx_itr_setting); + rxr->itr = ifx->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), - vsi->tx_itr_setting); - txr->itr = vsi->tx_itr_setting; + ifx->tx_itr_setting); + txr->itr = ifx->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } @@ -2274,8 +2284,8 @@ static void ixl_free_pci_resources(struct ixl_pf * pf) { - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; device_t dev = pf->dev; int rid, memrid; @@ -2288,7 +2298,7 @@ /* ** Release all msix VSI resources: */ - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); @@ -2327,32 +2337,32 @@ } static void -ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type) +ixl_add_ifmedia(struct ixl_ifx *ifx, u32 phy_type) { /* Display supported media types */ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) || phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); } /********************************************************************* @@ -2361,17 +2371,17 @@ * **********************************************************************/ static int -ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) +ixl_setup_interface(device_t dev, struct ixl_ifx *ifx) { struct ifnet *ifp; - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct i40e_hw *hw = ifx->hw; + struct ixl_queue *que = ifx->queues; struct i40e_aq_get_phy_abilities_resp abilities_resp; enum i40e_status_code aq_error = 0; INIT_DEBUGOUT("ixl_setup_interface: begin"); - ifp = vsi->ifp = if_alloc(IFT_ETHER); + ifp = ifx->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); @@ -2380,7 +2390,7 @@ ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 4000000000; // ?? ifp->if_init = ixl_init; - ifp->if_softc = vsi; + ifp->if_softc = ifx; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixl_ioctl; @@ -2394,7 +2404,7 @@ ifp->if_snd.ifq_maxlen = que->num_desc - 2; - vsi->max_frame_size = + ifx->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; @@ -2430,7 +2440,7 @@ * Specify the media types supported by this adapter and register * callbacks to update media and link information */ - ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, + ifmedia_init(&ifx->media, IFM_IMASK, ixl_media_change, ixl_media_status); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL); @@ -2442,16 +2452,16 @@ if (aq_error == I40E_ERR_UNKNOWN_PHY) device_printf(dev, "Unknown PHY type detected!\n"); else - ixl_add_ifmedia(vsi, abilities_resp.phy_type); + ixl_add_ifmedia(ifx, abilities_resp.phy_type); } else if (aq_error) { device_printf(dev, "Error getting supported media types, err %d," " AQ error %d\n", aq_error, hw->aq.asq_last_status); } else - ixl_add_ifmedia(vsi, abilities_resp.phy_type); + ixl_add_ifmedia(ifx, abilities_resp.phy_type); /* Use autoselect media by default */ - ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); + ifmedia_add(&ifx->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&ifx->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, hw->mac.addr); @@ -2477,10 +2487,10 @@ * **********************************************************************/ static int -ixl_setup_vsi(struct ixl_vsi *vsi) +ixl_setup_vsi(struct ixl_ifx *ifx) { - struct i40e_hw *hw = vsi->hw; - device_t dev = vsi->dev; + struct i40e_hw *hw = ifx->hw; + device_t dev = ifx->dev; struct i40e_aqc_get_switch_config_resp *sw_config; struct i40e_vsi_context ctxt; u8 aq_buf[I40E_AQ_LARGE_BUF]; @@ -2504,10 +2514,10 @@ sw_config->element[0].downlink_seid); #endif /* Save off this important value */ - vsi->seid = sw_config->element[0].seid; + ifx->vsi.seid = sw_config->element[0].seid; memset(&ctxt, 0, sizeof(ctxt)); - ctxt.seid = vsi->seid; + ctxt.seid = ifx->vsi.seid; ctxt.pf_num = hw->pf_id; ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (ret) { @@ -2536,18 +2546,18 @@ /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; - if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) + if (ifx->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; else ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; /* Keep copy of VSI info in VSI for statistic counters */ - memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + memcpy(&ifx->vsi.info, &ctxt.info, sizeof(ctxt.info)); /* Reset VSI statistics */ - ixl_vsi_reset_stats(vsi); - vsi->hw_filters_add = 0; - vsi->hw_filters_del = 0; + ixl_vsi_reset_stats(&ifx->vsi); + ifx->vsi.hw_filters_add = 0; + ifx->vsi.hw_filters_del = 0; ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) @@ -2565,15 +2575,15 @@ * **********************************************************************/ static int -ixl_initialize_vsi(struct ixl_vsi *vsi) +ixl_initialize_vsi(struct ixl_ifx *ifx) { - struct ixl_queue *que = vsi->queues; - device_t dev = vsi->dev; - struct i40e_hw *hw = vsi->hw; + struct ixl_queue *que = ifx->queues; + device_t dev = ifx->dev; + struct i40e_hw *hw = ifx->hw; int err = 0; - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; struct i40e_hmc_obj_txq tctx; @@ -2589,7 +2599,7 @@ tctx.base = (txr->dma.pa/128); tctx.qlen = que->num_desc; tctx.fc_ena = 0; - tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ + tctx.rdylist = ifx->vsi.info.qs_handle[0]; /* index is TC */ /* Enable HEAD writeback */ tctx.head_wb_ena = 1; tctx.head_wb_addr = txr->dma.pa + @@ -2616,7 +2626,7 @@ ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ - if (vsi->max_frame_size <= 2048) + if (ifx->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; @@ -2628,8 +2638,8 @@ rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; /* ignore header split for now */ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; - rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? - vsi->max_frame_size : max_rxmax; + rctx.rxmax = (ifx->max_frame_size < max_rxmax) ? + ifx->max_frame_size : max_rxmax; rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no HDR split initially */ @@ -2662,8 +2672,8 @@ device_printf(dev, "Fail in init_rx_ring %d\n", i); break; } - wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0); - wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); + wr32(ifx->hw, I40E_QRX_TAIL(que->me), 0); + wr32(ifx->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); } return (err); } @@ -2675,14 +2685,13 @@ * **********************************************************************/ void -ixl_free_vsi(struct ixl_vsi *vsi) +ixl_free_ifx(struct ixl_ifx *ifx) { - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct ixl_queue *que = vsi->queues; - struct ixl_mac_filter *f; + struct ixl_pf *pf = (struct ixl_pf *)ifx->back; + struct ixl_queue *que = ifx->queues; /* Free station queues */ - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; @@ -2705,9 +2714,17 @@ IXL_RX_LOCK_DESTROY(rxr); } - free(vsi->queues, M_DEVBUF); + free(ifx->queues, M_DEVBUF); /* Free VSI filter list */ + ixl_free_mac_filters(&ifx->vsi); +} + +static void +ixl_free_mac_filters(struct ixl_vsi *vsi) +{ + struct ixl_mac_filter *f; + while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); @@ -2727,35 +2744,36 @@ ixl_setup_stations(struct ixl_pf *pf) { device_t dev = pf->dev; - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; struct ixl_queue *que; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; - vsi = &pf->vsi; - vsi->back = (void *)pf; - vsi->hw = &pf->hw; - vsi->id = 0; - vsi->num_vlans = 0; + ifx = &pf->ifx; + ifx->back = (void *)pf; + ifx->hw = &pf->hw; + ifx->id = 0; + ifx->vsi.num_vlans = 0; + ifx->vsi.back = pf; /* Get memory for the station queues */ - if (!(vsi->queues = + if (!(ifx->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * - vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + ifx->vsi.num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto early; } - for (int i = 0; i < vsi->num_queues; i++) { - que = &vsi->queues[i]; + for (int i = 0; i < ifx->vsi.num_queues; i++) { + que = &ifx->queues[i]; que->num_desc = ixl_ringsz; que->me = i; - que->vsi = vsi; + que->ifx = ifx; /* mark the queue as active */ - vsi->active_queues |= (u64)1 << que->me; + ifx->active_queues |= (u64)1 << que->me; txr = &que->txr; txr->que = que; txr->tail = I40E_QTX_TAIL(que->me); @@ -2830,8 +2848,8 @@ return (0); fail: - for (int i = 0; i < vsi->num_queues; i++) { - que = &vsi->queues[i]; + for (int i = 0; i < ifx->vsi.num_queues; i++) { + que = &ifx->queues[i]; rxr = &que->rxr; txr = &que->txr; if (rxr->base) @@ -2851,8 +2869,8 @@ static void ixl_set_queue_rx_itr(struct ixl_queue *que) { - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; @@ -2903,11 +2921,11 @@ que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ - if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) - vsi->rx_itr_setting = ixl_rx_itr; + if (ifx->rx_itr_setting & IXL_ITR_DYNAMIC) + ifx->rx_itr_setting = ixl_rx_itr; /* Update the hardware if needed */ - if (rxr->itr != vsi->rx_itr_setting) { - rxr->itr = vsi->rx_itr_setting; + if (rxr->itr != ifx->rx_itr_setting) { + rxr->itr = ifx->rx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } @@ -2925,8 +2943,8 @@ static void ixl_set_queue_tx_itr(struct ixl_queue *que) { - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; @@ -2977,11 +2995,11 @@ } } else { /* We may have have toggled to non-dynamic */ - if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) - vsi->tx_itr_setting = ixl_tx_itr; + if (ifx->tx_itr_setting & IXL_ITR_DYNAMIC) + ifx->tx_itr_setting = ixl_tx_itr; /* Update the hardware if needed */ - if (txr->itr != vsi->tx_itr_setting) { - txr->itr = vsi->tx_itr_setting; + if (txr->itr != ifx->tx_itr_setting) { + txr->itr = ifx->tx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } @@ -2991,25 +3009,47 @@ return; } +#define QUEUE_NAME_LEN 32 + +static void +ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, + struct sysctl_ctx_list *ctx) +{ + struct sysctl_oid *tree; + struct sysctl_oid_list *child; + struct sysctl_oid_list *vsi_list; + char vsi_namebuf[QUEUE_NAME_LEN]; + + tree = device_get_sysctl_tree(pf->dev); + child = SYSCTL_CHILDREN(tree); + snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", + vsi->info.stat_counter_idx); + vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, vsi_namebuf, + CTLFLAG_RD, NULL, "VSI Number"); + vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); + + ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); +} static void ixl_add_hw_stats(struct ixl_pf *pf) { device_t dev = pf->dev; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *queues = vsi->queues; - struct i40e_eth_stats *vsi_stats = &vsi->eth_stats; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *queues = ifx->queues; struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct sysctl_oid_list *vsi_list; - struct sysctl_oid *vsi_node, *queue_node; - struct sysctl_oid_list *vsi_list, *queue_list; + struct sysctl_oid *queue_node; + struct sysctl_oid_list *queue_list; struct tx_ring *txr; struct rx_ring *rxr; + char queue_namebuf[QUEUE_NAME_LEN]; /* Driver statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", @@ -3019,20 +3059,12 @@ CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); - /* VSI statistics */ -#define QUEUE_NAME_LEN 32 - char queue_namebuf[QUEUE_NAME_LEN]; - - // ERJ: Only one vsi now, re-do when >1 VSI enabled - // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx); - vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", - CTLFLAG_RD, NULL, "VSI-specific stats"); - vsi_list = SYSCTL_CHILDREN(vsi_node); - ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats); + ixl_add_vsi_sysctls(pf, &pf->ifx.vsi, ctx); + vsi_list = SYSCTL_CHILDREN(pf->ifx.vsi.vsi_node); /* Queue statistics */ - for (int q = 0; q < vsi->num_queues; q++) { + for (int q = 0; q < ifx->vsi.num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); @@ -3175,10 +3207,10 @@ ** ixl_config_rss - setup RSS ** - note this is done for the single vsi */ -static void ixl_config_rss(struct ixl_vsi *vsi) +static void ixl_config_rss(struct ixl_ifx *ifx) { - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct i40e_hw *hw = vsi->hw; + struct ixl_pf *pf = (struct ixl_pf *)ifx->back; + struct i40e_hw *hw = ifx->hw; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; @@ -3240,7 +3272,7 @@ /* Populate the LUT with max no. of queues in round robin fashion */ for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { - if (j == vsi->num_queues) + if (j == ifx->vsi.num_queues) j = 0; #ifdef RSS /* @@ -3274,9 +3306,9 @@ static void ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; - struct i40e_hw *hw = vsi->hw; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct i40e_hw *hw = ifx->hw; + struct ixl_pf *pf = (struct ixl_pf *)ifx->back; if (ifp->if_softc != arg) /* Not our event */ return; @@ -3285,8 +3317,8 @@ return; IXL_PF_LOCK(pf); - ++vsi->num_vlans; - ixl_add_filter(vsi, hw->mac.addr, vtag); + ++ifx->vsi.num_vlans; + ixl_add_filter(&ifx->vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } @@ -3298,9 +3330,9 @@ static void ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; - struct i40e_hw *hw = vsi->hw; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct i40e_hw *hw = ifx->hw; + struct ixl_pf *pf = (struct ixl_pf *)ifx->back; if (ifp->if_softc != arg) return; @@ -3309,8 +3341,8 @@ return; IXL_PF_LOCK(pf); - --vsi->num_vlans; - ixl_del_filter(vsi, hw->mac.addr, vtag); + --ifx->vsi.num_vlans; + ixl_del_filter(&ifx->vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } @@ -3355,27 +3387,27 @@ ** needs to know about. */ static void -ixl_init_filters(struct ixl_vsi *vsi) +ixl_init_filters(struct ixl_ifx *ifx) { + /* Add broadcast address */ - u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - ixl_add_filter(vsi, bc, IXL_VLAN_ANY); + ixl_add_filter(&ifx->vsi, ixl_bcast_addr, IXL_VLAN_ANY); } /* ** This routine adds mulicast filters */ static void -ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) +ixl_add_mc_filter(struct ixl_ifx *ifx, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ - f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); + f = ixl_find_filter(&ifx->vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; - f = ixl_get_filter(vsi); + f = ixl_get_filter(&ifx->vsi); if (f == NULL) { printf("WARNING: no filter available!!\n"); return; @@ -3395,10 +3427,14 @@ ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; - device_t dev = vsi->dev; + struct ixl_pf *pf; + device_t dev; DEBUGOUT("ixl_add_filter: begin"); + pf = vsi->back; + dev = pf->dev; + /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) @@ -3485,10 +3521,16 @@ { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; - struct i40e_hw *hw = vsi->hw; - device_t dev = vsi->dev; + struct ixl_pf *pf; + struct i40e_hw *hw; + device_t dev; int err, j = 0; + pf = vsi->back; + dev = pf->dev; + hw = &pf->hw; + IXL_PF_LOCK_ASSERT(pf); + a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { @@ -3535,13 +3577,18 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; - struct i40e_hw *hw = vsi->hw; - device_t dev = vsi->dev; + struct ixl_pf *pf; + struct i40e_hw *hw; + device_t dev; struct ixl_mac_filter *f, *f_temp; int err, j = 0; DEBUGOUT("ixl_del_hw_filters: begin\n"); + pf = vsi->back; + hw = &pf->hw; + dev = pf->dev; + d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { @@ -3585,82 +3632,106 @@ return; } - -static void +static int ixl_enable_rings(struct ixl_vsi *vsi) { - struct i40e_hw *hw = vsi->hw; + struct ixl_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int index, error; u32 reg; + error = 0; for (int i = 0; i < vsi->num_queues; i++) { - i40e_pre_tx_queue_cfg(hw, i, TRUE); + index = vsi->first_queue + i; + i40e_pre_tx_queue_cfg(hw, index, TRUE); - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; - wr32(hw, I40E_QTX_ENA(i), reg); + wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } - if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) - printf("TX queue %d disabled!\n", i); + if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { + device_printf(pf->dev, "TX queue %d disabled!\n", + index); + error = ETIMEDOUT; + } - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; - wr32(hw, I40E_QRX_ENA(i), reg); + wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } - if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) - printf("RX queue %d disabled!\n", i); + if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { + device_printf(pf->dev, "RX queue %d disabled!\n", + index); + error = ETIMEDOUT; + } } + + return (error); } -static void +static int ixl_disable_rings(struct ixl_vsi *vsi) { - struct i40e_hw *hw = vsi->hw; + struct ixl_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int index, error; u32 reg; + error = 0; for (int i = 0; i < vsi->num_queues; i++) { - i40e_pre_tx_queue_cfg(hw, i, FALSE); + index = vsi->first_queue + i; + + i40e_pre_tx_queue_cfg(hw, index, FALSE); i40e_usec_delay(500); - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QTX_ENA(i), reg); + wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } - if (reg & I40E_QTX_ENA_QENA_STAT_MASK) - printf("TX queue %d still enabled!\n", i); + if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { + device_printf(pf->dev, "TX queue %d still enabled!\n", + index); + error = ETIMEDOUT; + } - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QRX_ENA(i), reg); + wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } - if (reg & I40E_QRX_ENA_QENA_STAT_MASK) - printf("RX queue %d still enabled!\n", i); + if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { + device_printf(pf->dev, "RX queue %d still enabled!\n", + index); + error = ETIMEDOUT; + } } + + return (error); } /** @@ -3736,28 +3807,28 @@ } static void -ixl_enable_intr(struct ixl_vsi *vsi) +ixl_enable_intr(struct ixl_ifx *ifx) { - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct i40e_hw *hw = ifx->hw; + struct ixl_queue *que = ifx->queues; if (ixl_enable_msix) { ixl_enable_adminq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) ixl_enable_queue(hw, que->me); } else ixl_enable_legacy(hw); } static void -ixl_disable_intr(struct ixl_vsi *vsi) +ixl_disable_intr(struct ixl_ifx *ifx) { - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct i40e_hw *hw = ifx->hw; + struct ixl_queue *que = ifx->queues; if (ixl_enable_msix) { ixl_disable_adminq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) ixl_disable_queue(hw, que->me); } else ixl_disable_legacy(hw); @@ -3834,7 +3905,7 @@ ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_ifx *ifx = &pf->ifx; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; @@ -3995,12 +4066,12 @@ pf->stat_offsets_loaded = true; /* End hw stats */ - /* Update vsi stats */ - ixl_update_eth_stats(vsi); + /* Update ifx stats */ + ixl_update_ifx_stats(ifx); /* OS statistics */ // ERJ - these are per-port, update all vsis? - IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes); + IXL_SET_IERRORS(ifx, nsd->crc_errors + nsd->illegal_bytes); } /* @@ -4012,7 +4083,7 @@ { struct ixl_pf *pf = context; struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_ifx *ifx = &pf->ifx; struct i40e_arq_event_info event; i40e_status ret; u32 reg, loop = 0; @@ -4034,7 +4105,7 @@ opcode = LE16_TO_CPU(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: - vsi->link_up = ixl_config_link(hw); + ifx->link_up = ixl_config_link(hw); ixl_update_link_status(pf); break; case i40e_aqc_opc_send_msg_to_pf: @@ -4059,7 +4130,7 @@ if (pf->msix > 1) ixl_enable_adminq(&pf->hw); else - ixl_enable_intr(vsi); + ixl_enable_intr(ifx); } static int @@ -4085,8 +4156,8 @@ ixl_print_debug_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &pf->ifx; + struct ixl_queue *que = ifx->queues; struct rx_ring *rxr = &que->rxr; struct tx_ring *txr = &que->txr; u32 reg; @@ -4136,8 +4207,6 @@ struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; - int i; - uint64_t tx_discards; struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; @@ -4187,28 +4256,46 @@ vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; +} + +static void +ixl_update_ifx_stats(struct ixl_ifx *ifx) +{ + struct ixl_pf *pf; + struct ifnet *ifp; + struct i40e_eth_stats *es; + struct i40e_hw_port_stats *nsd; + uint64_t tx_discards; + int i; + + pf = ifx->back; + ifp = ifx->ifp; + es = &ifx->vsi.eth_stats; + nsd = &pf->stats; + + ixl_update_eth_stats(&ifx->vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; - for (i = 0; i < vsi->num_queues; i++) - tx_discards += vsi->queues[i].txr.br->br_drops; + for (i = 0; i < ifx->vsi.num_queues; i++) + tx_discards += ifx->queues[i].txr.br->br_drops; /* Update ifnet stats */ - IXL_SET_IPACKETS(vsi, es->rx_unicast + + IXL_SET_IPACKETS(ifx, es->rx_unicast + es->rx_multicast + es->rx_broadcast); - IXL_SET_OPACKETS(vsi, es->tx_unicast + + IXL_SET_OPACKETS(ifx, es->tx_unicast + es->tx_multicast + es->tx_broadcast); - IXL_SET_IBYTES(vsi, es->rx_bytes); - IXL_SET_OBYTES(vsi, es->tx_bytes); - IXL_SET_IMCASTS(vsi, es->rx_multicast); - IXL_SET_OMCASTS(vsi, es->tx_multicast); - - IXL_SET_OERRORS(vsi, es->tx_errors); - IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); - IXL_SET_OQDROPS(vsi, tx_discards); - IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); - IXL_SET_COLLISIONS(vsi, 0); + IXL_SET_IBYTES(ifx, es->rx_bytes); + IXL_SET_OBYTES(ifx, es->tx_bytes); + IXL_SET_IMCASTS(ifx, es->rx_multicast); + IXL_SET_OMCASTS(ifx, es->tx_multicast); + + IXL_SET_OERRORS(ifx, es->tx_errors); + IXL_SET_IQDROPS(ifx, es->rx_discards + nsd->eth.rx_discards); + IXL_SET_OQDROPS(ifx, tx_discards); + IXL_SET_NOPROTO(ifx, es->rx_unknown_protocol); + IXL_SET_COLLISIONS(ifx, 0); } /** @@ -4664,7 +4751,7 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->ifx.vsi; struct ixl_mac_filter *f; char *buf, *buf_i; @@ -4937,7 +5024,7 @@ sbuf_cat(buf, "\n"); // set to queue 1? - struct ixl_queue *que = pf->vsi.queues; + struct ixl_queue *que = pf->ifx.queues; struct tx_ring *txr = &(que[1].txr); struct i40e_tx_desc *txd = &txr->base[desc_idx]; Index: sys/dev/ixl/if_ixlv.c =================================================================== --- sys/dev/ixl/if_ixlv.c +++ sys/dev/ixl/if_ixlv.c @@ -86,9 +86,9 @@ static int ixlv_setup_queues(struct ixlv_sc *); static void ixlv_config_rss(struct ixlv_sc *); static void ixlv_stop(struct ixlv_sc *); -static void ixlv_add_multi(struct ixl_vsi *); -static void ixlv_del_multi(struct ixl_vsi *); -static void ixlv_free_queues(struct ixl_vsi *); +static void ixlv_add_multi(struct ixl_ifx *); +static void ixlv_del_multi(struct ixl_ifx *); +static void ixlv_free_queues(struct ixl_ifx *); static int ixlv_setup_interface(device_t, struct ixlv_sc *); static int ixlv_media_change(struct ifnet *); @@ -126,7 +126,7 @@ static int ixlv_setup_vc(struct ixlv_sc *); static int ixlv_vf_config(struct ixlv_sc *); -static void ixlv_cap_txcsum_tso(struct ixl_vsi *, +static void ixlv_cap_txcsum_tso(struct ixl_ifx *, struct ifnet *, int); static void ixlv_add_sysctls(struct ixlv_sc *); @@ -279,7 +279,7 @@ { struct ixlv_sc *sc; struct i40e_hw *hw; - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; int error = 0; INIT_DBG_DEV(dev, "begin"); @@ -288,8 +288,8 @@ sc = device_get_softc(dev); sc->dev = sc->osdep.dev = dev; hw = &sc->hw; - vsi = &sc->vsi; - vsi->dev = dev; + ifx = &sc->ifx; + ifx->dev = dev; /* Initialize hw struct */ ixlv_init_hw(sc); @@ -389,9 +389,9 @@ bcopy(addr, hw->mac.addr, sizeof(addr)); } - vsi->id = sc->vsi_res->vsi_id; - vsi->back = (void *)sc; - vsi->link_up = TRUE; + ifx->id = sc->vsi_res->vsi_id; + ifx->back = (void *)sc; + ifx->link_up = TRUE; /* This allocates the memory and early settings */ if (ixlv_setup_queues(sc) != 0) { @@ -418,14 +418,14 @@ ixlv_init_taskqueue(sc); /* Initialize stats */ - bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); + bzero(&sc->ifx.vsi.eth_stats, sizeof(struct i40e_eth_stats)); ixlv_add_sysctls(sc); /* Register for VLAN events */ - vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); - vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); + ifx->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixlv_register_vlan, ifx, EVENTHANDLER_PRI_FIRST); + ifx->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixlv_unregister_vlan, ifx, EVENTHANDLER_PRI_FIRST); /* We want AQ enabled early */ ixlv_enable_adminq_irq(hw); @@ -439,7 +439,7 @@ return (error); out: - ixlv_free_queues(vsi); + ixlv_free_queues(ifx); err_res_buf: free(sc->vf_res, M_DEVBUF); err_aq: @@ -467,40 +467,40 @@ ixlv_detach(device_t dev) { struct ixlv_sc *sc = device_get_softc(dev); - struct ixl_vsi *vsi = &sc->vsi; + struct ixl_ifx *ifx = &sc->ifx; INIT_DBG_DEV(dev, "begin"); /* Make sure VLANS are not using driver */ - if (vsi->ifp->if_vlantrunk != NULL) { - device_printf(dev, "Vlan in use, detach first\n"); + if (ifx->ifp->if_vlantrunk != NULL) { + if_printf(ifx->ifp, "Vlan in use, detach first\n"); INIT_DBG_DEV(dev, "end"); return (EBUSY); } /* Stop driver */ - ether_ifdetach(vsi->ifp); - if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { + ether_ifdetach(ifx->ifp); + if (ifx->ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); ixlv_stop(sc); mtx_unlock(&sc->mtx); } /* Unregister VLAN events */ - if (vsi->vlan_attach != NULL) - EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); - if (vsi->vlan_detach != NULL) - EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); + if (ifx->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, ifx->vlan_attach); + if (ifx->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, ifx->vlan_detach); /* Drain VC mgr */ callout_drain(&sc->vc_mgr.callout); i40e_shutdown_adminq(&sc->hw); taskqueue_free(sc->tq); - if_free(vsi->ifp); + if_free(ifx->ifp); free(sc->vf_res, M_DEVBUF); ixlv_free_pci_resources(sc); - ixlv_free_queues(vsi); + ixlv_free_queues(ifx); mtx_destroy(&sc->mtx); ixlv_free_filters(sc); @@ -536,7 +536,7 @@ * need to tweak them */ static void -ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) +ixlv_cap_txcsum_tso(struct ixl_ifx *ifx, struct ifnet *ifp, int mask) { /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) @@ -544,14 +544,14 @@ if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ - if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + if (ifx->flags & IXL_FLAGS_KEEP_TSO4) { + ifx->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + ifx->flags &= ~IXL_FLAGS_KEEP_TSO4; if_printf(ifp, "TSO4 requires txcsum, enabling both...\n"); } @@ -564,7 +564,7 @@ } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { - vsi->flags |= IXL_FLAGS_KEEP_TSO4; + ifx->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); if_printf(ifp, "TSO4 requires txcsum, disabling both...\n"); @@ -577,13 +577,13 @@ && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; - if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + if (ifx->flags & IXL_FLAGS_KEEP_TSO6) { + ifx->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + ifx->flags &= ~IXL_FLAGS_KEEP_TSO6; if_printf(ifp, "TSO6 requires txcsum6, enabling both...\n"); } @@ -596,7 +596,7 @@ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { - vsi->flags |= IXL_FLAGS_KEEP_TSO6; + ifx->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); if_printf(ifp, "TSO6 requires txcsum6, disabling both...\n"); @@ -617,8 +617,8 @@ static int ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixlv_sc *sc = vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct ixlv_sc *sc = ifx->back; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; @@ -646,7 +646,7 @@ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - ixlv_init(vsi); + ixlv_init(ifx); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); @@ -666,7 +666,7 @@ IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu); // ERJ: Interestingly enough, these types don't match ifp->if_mtu = (u_long)ifr->ifr_mtu; - vsi->max_frame_size = + ifx->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; ixlv_init_locked(sc); @@ -689,9 +689,9 @@ IOCTL_DBG_IF2(ifp, "SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); - ixlv_disable_intr(vsi); - ixlv_add_multi(vsi); - ixlv_enable_intr(vsi); + ixlv_disable_intr(ifx); + ixlv_add_multi(ifx); + ixlv_enable_intr(ifx); mtx_unlock(&sc->mtx); } break; @@ -699,9 +699,9 @@ IOCTL_DBG_IF2(ifp, "SIOCDELMULTI"); if (sc->init_state == IXLV_RUNNING) { mtx_lock(&sc->mtx); - ixlv_disable_intr(vsi); - ixlv_del_multi(vsi); - ixlv_enable_intr(vsi); + ixlv_disable_intr(ifx); + ixlv_del_multi(ifx); + ixlv_enable_intr(ifx); mtx_unlock(&sc->mtx); } break; @@ -715,7 +715,7 @@ int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)"); - ixlv_cap_txcsum_tso(vsi, ifp, mask); + ixlv_cap_txcsum_tso(ifx, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; @@ -730,7 +730,7 @@ if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ixlv_init(vsi); + ixlv_init(ifx); } VLAN_CAPABILITIES(ifp); @@ -758,8 +758,8 @@ ixlv_reinit_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = &sc->ifx; + struct ifnet *ifp = ifx->ifp; struct ixlv_mac_filter *mf, *mf_temp; struct ixlv_vlan_filter *vf; int error = 0; @@ -788,7 +788,7 @@ } else mf->flags |= IXL_FILTER_ADD; } - if (vsi->num_vlans != 0) + if (ifx->vsi.num_vlans != 0) SLIST_FOREACH(vf, sc->vlan_filters, next) vf->flags = IXL_FILTER_ADD; else { /* clean any stale filters */ @@ -820,7 +820,7 @@ * in that case. */ if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) { - if_printf(sc->vsi.ifp, + if_printf(sc->ifx.ifp, "Error %d waiting for PF to complete operation %d\n", code, cmd->request); } @@ -830,9 +830,9 @@ ixlv_init_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; + struct ifnet *ifp = ifx->ifp; int error = 0; INIT_DBG_IF(ifp, "begin"); @@ -881,12 +881,12 @@ ixlv_setup_vlan_filters(sc); /* Prepare the queues for operation */ - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; ixl_init_tx_ring(que); - if (vsi->max_frame_size <= 2048) + if (ifx->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; @@ -924,8 +924,8 @@ void ixlv_init(void *arg) { - struct ixl_vsi *vsi = (struct ixl_vsi *)arg; - struct ixlv_sc *sc = vsi->back; + struct ixl_ifx *ifx = (struct ixl_ifx *)arg; + struct ixlv_sc *sc = ifx->back; int retries = 0; mtx_lock(&sc->mtx); @@ -933,12 +933,12 @@ mtx_unlock(&sc->mtx); /* Wait for init_locked to finish */ - while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) + while (!(ifx->ifp->if_drv_flags & IFF_DRV_RUNNING) && ++retries < 100) { i40e_msec_delay(10); } if (retries >= IXLV_AQ_MAX_ERR) - if_printf(vsi->ifp, + if_printf(ifx->ifp, "Init failed to complete in alloted time!\n"); } @@ -1210,7 +1210,7 @@ device_printf(sc->dev, "Using MSIX interrupts with %d vectors\n", vectors); sc->msix = vectors; - sc->vsi.num_queues = queues; + sc->ifx.vsi.num_queues = queues; } /* @@ -1301,8 +1301,8 @@ static void ixlv_free_pci_resources(struct ixlv_sc *sc) { - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; device_t dev = sc->dev; /* We may get here before stations are setup */ @@ -1312,7 +1312,7 @@ /* ** Release all msix queue resources: */ - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { int rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); @@ -1371,12 +1371,12 @@ ixlv_assign_msix(struct ixlv_sc *sc) { device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; struct tx_ring *txr; int error, rid, vector = 1; - for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; @@ -1403,7 +1403,7 @@ #endif bus_bind_intr(dev, que->res, cpu_id); que->msix = vector; - vsi->que_mask |= (u64)(1 << que->msix); + ifx->que_mask |= (u64)(1 << que->msix); TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixlv_handle_que, que); que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT, @@ -1491,12 +1491,12 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc) { struct ifnet *ifp; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; INIT_DBG_DEV(dev, "begin"); - ifp = vsi->ifp = if_alloc(IFT_ETHER); + ifp = ifx->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "%s: could not allocate ifnet" " structure!\n", __func__); @@ -1508,7 +1508,7 @@ ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 4000000000; // ?? ifp->if_init = ixlv_init; - ifp->if_softc = vsi; + ifp->if_softc = ifx; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixlv_ioctl; @@ -1523,7 +1523,7 @@ ether_ifattach(ifp, sc->hw.mac.addr); - vsi->max_frame_size = + ifx->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; @@ -1577,34 +1577,34 @@ ixlv_setup_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; struct ixl_queue *que; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; - vsi = &sc->vsi; - vsi->back = (void *)sc; - vsi->hw = &sc->hw; - vsi->num_vlans = 0; + ifx = &sc->ifx; + ifx->back = (void *)sc; + ifx->hw = &sc->hw; + ifx->vsi.num_vlans = 0; /* Get memory for the station queues */ - if (!(vsi->queues = + if (!(ifx->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * - vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + ifx->vsi.num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto early; } - for (int i = 0; i < vsi->num_queues; i++) { - que = &vsi->queues[i]; + for (int i = 0; i < ifx->vsi.num_queues; i++) { + que = &ifx->queues[i]; que->num_desc = ixlv_ringsz; que->me = i; - que->vsi = vsi; + que->ifx = ifx; /* mark the queue as active */ - vsi->active_queues |= (u64)1 << que->me; + ifx->active_queues |= (u64)1 << que->me; txr = &que->txr; txr->que = que; @@ -1682,9 +1682,9 @@ return (0); fail: - free(vsi->queues, M_DEVBUF); - for (int i = 0; i < vsi->num_queues; i++) { - que = &vsi->queues[i]; + free(ifx->queues, M_DEVBUF); + for (int i = 0; i < ifx->vsi.num_queues; i++) { + que = &ifx->queues[i]; rxr = &que->rxr; txr = &que->txr; if (rxr->base) @@ -1707,12 +1707,12 @@ static void ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixlv_sc *sc = vsi->back; + struct ixl_ifx *ifx = arg; + struct ixlv_sc *sc = ifx->back; struct ixlv_vlan_filter *v; - if (ifp->if_softc != arg) /* Not our event */ + if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ @@ -1725,7 +1725,7 @@ } mtx_lock(&sc->mtx); - ++vsi->num_vlans; + ++ifx->vsi.num_vlans; v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INSERT_HEAD(sc->vlan_filters, v, next); v->vlan = vtag; @@ -1744,12 +1744,12 @@ static void ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixlv_sc *sc = vsi->back; + struct ixl_ifx *ifx = arg; + struct ixlv_sc *sc = ifx->back; struct ixlv_vlan_filter *v; int i = 0; - if (ifp->if_softc != arg) + if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ @@ -1760,7 +1760,7 @@ if (v->vlan == vtag) { v->flags = IXL_FILTER_DEL; ++i; - --vsi->num_vlans; + --ifx->vsi.num_vlans; } } if (i) @@ -1868,24 +1868,24 @@ } void -ixlv_enable_intr(struct ixl_vsi *vsi) +ixlv_enable_intr(struct ixl_ifx *ifx) { - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct i40e_hw *hw = ifx->hw; + struct ixl_queue *que = ifx->queues; ixlv_enable_adminq_irq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) ixlv_enable_queue_irq(hw, que->me); } void -ixlv_disable_intr(struct ixl_vsi *vsi) +ixlv_disable_intr(struct ixl_ifx *ifx) { - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct i40e_hw *hw = ifx->hw; + struct ixl_queue *que = ifx->queues; ixlv_disable_adminq_irq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) ixlv_disable_queue_irq(hw, que->me); } @@ -1938,8 +1938,8 @@ static void ixlv_set_queue_rx_itr(struct ixl_queue *que) { - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; @@ -1990,11 +1990,11 @@ que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ - if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) - vsi->rx_itr_setting = ixlv_rx_itr; + if (ifx->rx_itr_setting & IXL_ITR_DYNAMIC) + ifx->rx_itr_setting = ixlv_rx_itr; /* Update the hardware if needed */ - if (rxr->itr != vsi->rx_itr_setting) { - rxr->itr = vsi->rx_itr_setting; + if (rxr->itr != ifx->rx_itr_setting) { + rxr->itr = ifx->rx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, que->me), rxr->itr); } @@ -2012,8 +2012,8 @@ static void ixlv_set_queue_tx_itr(struct ixl_queue *que) { - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; @@ -2064,11 +2064,11 @@ } } else { /* We may have have toggled to non-dynamic */ - if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) - vsi->tx_itr_setting = ixlv_tx_itr; + if (ifx->tx_itr_setting & IXL_ITR_DYNAMIC) + ifx->tx_itr_setting = ixlv_tx_itr; /* Update the hardware if needed */ - if (txr->itr != vsi->tx_itr_setting) { - txr->itr = vsi->tx_itr_setting; + if (txr->itr != ifx->tx_itr_setting) { + txr->itr = ifx->tx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, que->me), txr->itr); } @@ -2088,10 +2088,10 @@ ixlv_handle_que(void *context, int pending) { struct ixl_queue *que = context; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { @@ -2122,13 +2122,13 @@ ixlv_msix_que(void *arg) { struct ixl_queue *que = arg; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Spurious interrupts are ignored */ - if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) + if (!(ifx->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; ++que->irqs; @@ -2142,7 +2142,7 @@ ** has anything queued the task gets ** scheduled to handle it. */ - if (!drbr_empty(vsi->ifp, txr->br)) + if (!drbr_empty(ifx->ifp, txr->br)) more_tx = 1; mtx_unlock(&txr->mtx); @@ -2169,8 +2169,8 @@ static void ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ixlv_sc *sc = vsi->back; + struct ixl_ifx *ifx = ifp->if_softc; + struct ixlv_sc *sc = ifx->back; INIT_DBG_IF(ifp, "begin"); @@ -2181,7 +2181,7 @@ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; - if (!vsi->link_up) { + if (!ifx->link_up) { mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end: link not up"); return; @@ -2206,8 +2206,8 @@ static int ixlv_media_change(struct ifnet * ifp) { - struct ixl_vsi *vsi = ifp->if_softc; - struct ifmedia *ifm = &vsi->media; + struct ixl_ifx *ifx = ifp->if_softc; + struct ifmedia *ifm = &ifx->media; INIT_DBG_IF(ifp, "begin"); @@ -2227,10 +2227,10 @@ **********************************************************************/ static void -ixlv_init_multi(struct ixl_vsi *vsi) +ixlv_init_multi(struct ixl_ifx *ifx) { struct ixlv_mac_filter *f; - struct ixlv_sc *sc = vsi->back; + struct ixlv_sc *sc = ifx->back; int mcnt = 0; IOCTL_DBG_IF(vsi->ifp, "begin"); @@ -2252,11 +2252,11 @@ } static void -ixlv_add_multi(struct ixl_vsi *vsi) +ixlv_add_multi(struct ixl_ifx *ifx) { struct ifmultiaddr *ifma; - struct ifnet *ifp = vsi->ifp; - struct ixlv_sc *sc = vsi->back; + struct ifnet *ifp = ifx->ifp; + struct ixlv_sc *sc = ifx->back; int mcnt = 0; IOCTL_DBG_IF(ifp, "begin"); @@ -2276,7 +2276,7 @@ // TODO: Remove -- cannot set promiscuous mode in a VF if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete all multicast filters */ - ixlv_init_multi(vsi); + ixlv_init_multi(ifx); sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, @@ -2309,12 +2309,12 @@ } static void -ixlv_del_multi(struct ixl_vsi *vsi) +ixlv_del_multi(struct ixl_ifx *ifx) { struct ixlv_mac_filter *f; struct ifmultiaddr *ifma; - struct ifnet *ifp = vsi->ifp; - struct ixlv_sc *sc = vsi->back; + struct ifnet *ifp = ifx->ifp; + struct ixlv_sc *sc = ifx->back; int mcnt = 0; bool match = FALSE; @@ -2372,8 +2372,8 @@ { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; device_t dev = sc->dev; int hung = 0; u32 mask, val; @@ -2405,7 +2405,7 @@ mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK | I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK); - for (int i = 0; i < vsi->num_queues; i++,que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++,que++) { /* Any queues with outstanding work get a sw irq */ if (que->busy) wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask); @@ -2417,12 +2417,12 @@ if (que->busy == IXL_QUEUE_HUNG) { ++hung; /* Mark the queue as inactive */ - vsi->active_queues &= ~((u64)1 << que->me); + ifx->active_queues &= ~((u64)1 << que->me); continue; } else { /* Check if we've come back from hung */ - if ((vsi->active_queues & ((u64)1 << que->me)) == 0) - vsi->active_queues |= ((u64)1 << que->me); + if ((ifx->active_queues & ((u64)1 << que->me)) == 0) + ifx->active_queues |= ((u64)1 << que->me); } if (que->busy >= IXL_MAX_TX_BUSY) { device_printf(dev,"Warning queue %d " @@ -2432,7 +2432,7 @@ } } /* Only reset when all queues show hung */ - if (hung == vsi->num_queues) + if (hung == ifx->vsi.num_queues) goto hung; callout_reset(&sc->timer, hz, ixlv_local_timer, sc); return; @@ -2451,24 +2451,23 @@ void ixlv_update_link_status(struct ixlv_sc *sc) { - struct ixl_vsi *vsi = &sc->vsi; - struct ifnet *ifp = vsi->ifp; - device_t dev = sc->dev; + struct ixl_ifx *ifx = &sc->ifx; + struct ifnet *ifp = ifx->ifp; - if (vsi->link_up){ - if (vsi->link_active == FALSE) { + if (ifx->link_up) { + if (ifx->link_active == FALSE) { if (bootverbose) - device_printf(dev,"Link is Up, %d Gbps\n", - (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); - vsi->link_active = TRUE; + if_printf(ifp,"Link is Up, %d Gbps\n", + (ifx->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); + ifx->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ - if (vsi->link_active == TRUE) { + if (ifx->link_active == TRUE) { if (bootverbose) - device_printf(dev,"Link is Down\n"); + if_printf(ifp,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); - vsi->link_active = FALSE; + ifx->link_active = FALSE; } } @@ -2488,7 +2487,7 @@ struct ifnet *ifp; int start; - ifp = sc->vsi.ifp; + ifp = sc->ifx.ifp; INIT_DBG_IF(ifp, "begin"); IXLV_CORE_LOCK_ASSERT(sc); @@ -2514,12 +2513,12 @@ * **********************************************************************/ static void -ixlv_free_queues(struct ixl_vsi *vsi) +ixlv_free_queues(struct ixl_ifx *ifx) { - struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; - struct ixl_queue *que = vsi->queues; + struct ixlv_sc *sc = (struct ixlv_sc *)ifx->back; + struct ixl_queue *que = ifx->queues; - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < ifx->vsi.num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; @@ -2542,7 +2541,7 @@ IXL_RX_LOCK_DESTROY(rxr); } - free(vsi->queues, M_DEVBUF); + free(ifx->queues, M_DEVBUF); } @@ -2555,7 +2554,7 @@ ixlv_config_rss(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; + struct ixl_ifx *ifx = &sc->ifx; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; @@ -2570,7 +2569,7 @@ #endif /* Don't set up RSS if using a single queue */ - if (vsi->num_queues == 1) { + if (ifx->vsi.num_queues == 1) { wr32(hw, I40E_VFQF_HENA(0), 0); wr32(hw, I40E_VFQF_HENA(1), 0); ixl_flush(hw); @@ -2624,7 +2623,7 @@ /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { - if (j == vsi->num_queues) + if (j == ifx->vsi.num_queues) j = 0; #ifdef RSS /* @@ -2656,11 +2655,11 @@ static void ixlv_setup_vlan_filters(struct ixlv_sc *sc) { - struct ixl_vsi *vsi = &sc->vsi; + struct ixl_ifx *ifx = &sc->ifx; struct ixlv_vlan_filter *f; int cnt = 0; - if (vsi->num_vlans == 0) + if (ifx->vsi.num_vlans == 0) return; /* ** Scan the filter table for vlan entries, @@ -2684,7 +2683,6 @@ ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) { struct ixlv_mac_filter *f; - device_t dev = sc->dev; /* Does one already exist? */ f = ixlv_find_mac_filter(sc, macaddr); @@ -2697,7 +2695,7 @@ /* If not, get a new empty filter */ f = ixlv_get_mac_filter(sc); if (f == NULL) { - device_printf(dev, "%s: no filters available!!\n", + if_printf(sc->ifx.ifp, "%s: no filters available!!\n", __func__); return (ENOMEM); } @@ -2774,8 +2772,8 @@ ixlv_add_sysctls(struct ixlv_sc *sc) { device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_eth_stats *es = &vsi->eth_stats; + struct ixl_ifx *ifx = &sc->ifx; + struct i40e_eth_stats *es = &ifx->vsi.eth_stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); @@ -2787,7 +2785,7 @@ #define QUEUE_NAME_LEN 32 char queue_namebuf[QUEUE_NAME_LEN]; - struct ixl_queue *queues = vsi->queues; + struct ixl_queue *queues = ifx->queues; struct tx_ring *txr; struct rx_ring *rxr; @@ -2828,14 +2826,14 @@ struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, + SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } /* Queue sysctls */ - for (int q = 0; q < vsi->num_queues; q++) { + for (int q = 0; q < ifx->vsi.num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue Name"); @@ -2844,34 +2842,34 @@ txr = &(queues[q].txr); rxr = &(queues[q].rxr); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &(queues[q].dropped_pkts), "Driver dropped packets"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", CTLFLAG_RD, &(queues[q].tx_dma_setup), "Driver tx dma failure in xmit"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); @@ -2935,7 +2933,7 @@ que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; - val = rd32(que->vsi->hw, que->txr.tail); + val = rd32(que->ifx->hw, que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; @@ -2957,7 +2955,7 @@ que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; - val = rd32(que->vsi->hw, que->rxr.tail); + val = rd32(que->ifx->hw, que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; Index: sys/dev/ixl/ixl.h =================================================================== --- sys/dev/ixl/ixl.h +++ sys/dev/ixl/ixl.h @@ -420,7 +420,7 @@ ** for the associated tx and rx ring pair. */ struct ixl_queue { - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; u32 me; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ @@ -445,13 +445,40 @@ u64 dropped_pkts; }; +SLIST_HEAD(i40e_ftl_head, ixl_mac_filter); +struct ixl_vsi { + void *back; + uint16_t seid; + uint16_t vsi_num; + uint16_t first_queue; + uint16_t num_queues; + + /* MAC/VLAN Filter list */ + struct i40e_ftl_head ftl; + u16 num_vlans; + u16 num_macs; + + /* Driver statistics */ + u64 hw_filters_del; + u64 hw_filters_add; + + struct i40e_aqc_vsi_properties_data info; + + struct sysctl_oid *vsi_node; + + /* Per-VSI stats */ + struct i40e_eth_stats eth_stats; + struct i40e_eth_stats eth_stats_offsets; + bool stat_offsets_loaded; +}; + /* ** Virtual Station interface: ** there would be one of these per traffic class/type ** for now just one, and its embedded in the pf */ SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); -struct ixl_vsi { +struct ixl_ifx { void *back; struct ifnet *ifp; struct device *dev; @@ -460,30 +487,25 @@ u64 que_mask; int id; u16 msix_base; /* station base MSIX vector */ - u16 num_queues; u16 rx_itr_setting; u16 tx_itr_setting; struct ixl_queue *queues; /* head of queues */ bool link_active; - u16 seid; + u16 uplink_seid; + u16 downlink_seid; u16 max_frame_size; u32 link_speed; bool link_up; u32 fc; /* local flow ctrl setting */ + struct ixl_vsi vsi; + /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; - struct i40e_aqc_vsi_properties_data info; - eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; - u16 num_vlans; - /* Per-VSI stats from hardware */ - struct i40e_eth_stats eth_stats; - struct i40e_eth_stats eth_stats_offsets; - bool stat_offsets_loaded; /* VSI stat counters */ u64 ipackets; u64 ierrors; @@ -497,10 +519,6 @@ u64 oqdrops; u64 noproto; - /* Driver statistics */ - u64 hw_filters_del; - u64 hw_filters_add; - /* Misc. */ u64 active_queues; u64 flags; @@ -598,7 +616,7 @@ int ixl_mq_start_locked(struct ifnet *, struct tx_ring *); void ixl_deferred_mq_start(void *, int); void ixl_qflush(struct ifnet *); -void ixl_free_vsi(struct ixl_vsi *); +void ixl_free_ifx(struct ixl_ifx *); void ixl_free_que_tx(struct ixl_queue *); void ixl_free_que_rx(struct ixl_queue *); #ifdef IXL_FDIR Index: sys/dev/ixl/ixl_pf.h =================================================================== --- sys/dev/ixl/ixl_pf.h +++ sys/dev/ixl/ixl_pf.h @@ -67,13 +67,14 @@ int advertised_speed; /* - ** VSI - Stations: + ** Network interfaces ** These are the traffic class holders, and ** will have a stack interface and queues ** associated with them. - ** NOTE: for now using just one, so embed it. + ** NOTE: The PF has only a single interface, + ** so it is embedded in the PF struct. */ - struct ixl_vsi vsi; + struct ixl_ifx ifx; /* Misc stats maintained by the driver */ u64 watchdog_events; Index: sys/dev/ixl/ixl_txrx.c =================================================================== --- sys/dev/ixl/ixl_txrx.c +++ sys/dev/ixl/ixl_txrx.c @@ -62,7 +62,7 @@ int ixl_mq_start(struct ifnet *ifp, struct mbuf *m) { - struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_ifx *ifx = ifp->if_softc; struct ixl_queue *que; struct tx_ring *txr; int err, i; @@ -85,18 +85,18 @@ i = bucket_id % vsi->num_queues; } else #endif - i = m->m_pkthdr.flowid % vsi->num_queues; + i = m->m_pkthdr.flowid % ifx->vsi.num_queues; } else - i = curcpu % vsi->num_queues; + i = curcpu % ifx->vsi.num_queues; /* ** This may not be perfect, but until something ** better comes along it will keep from scheduling ** on stalled queues. */ - if (((1 << i) & vsi->active_queues) == 0) - i = ffsl(vsi->active_queues); + if (((1 << i) & ifx->active_queues) == 0) + i = ffsl(ifx->active_queues); - que = &vsi->queues[i]; + que = &ifx->queues[i]; txr = &que->txr; err = drbr_enqueue(ifp, txr->br, m); @@ -115,13 +115,13 @@ ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct ixl_queue *que = txr->que; - struct ixl_vsi *vsi = que->vsi; + struct ixl_ifx *ifx = que->ifx; struct mbuf *next; int err = 0; if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || - vsi->link_active == 0) + ifx->link_active == 0) return (ENETDOWN); /* Process the transmit queue */ @@ -154,8 +154,8 @@ { struct ixl_queue *que = arg; struct tx_ring *txr = &que->txr; - struct ixl_vsi *vsi = que->vsi; - struct ifnet *ifp = vsi->ifp; + struct ixl_ifx *ifx = que->ifx; + struct ifnet *ifp = ifx->ifp; IXL_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) @@ -169,10 +169,10 @@ void ixl_qflush(struct ifnet *ifp) { - struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_ifx *ifx = ifp->if_softc; - for (int i = 0; i < vsi->num_queues; i++) { - struct ixl_queue *que = &vsi->queues[i]; + for (int i = 0; i < ifx->vsi.num_queues; i++) { + struct ixl_queue *que = &ifx->queues[i]; struct tx_ring *txr = &que->txr; struct mbuf *m; IXL_TX_LOCK(txr); @@ -223,8 +223,8 @@ static int ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) { - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; + struct ixl_ifx *ifx = que->ifx; + struct i40e_hw *hw = ifx->hw; struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; struct i40e_tx_desc *txd = NULL; @@ -403,8 +403,8 @@ ixl_allocate_tx_data(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; - struct ixl_vsi *vsi = que->vsi; - device_t dev = vsi->dev; + struct ixl_ifx *ifx = que->ifx; + device_t dev = ifx->dev; struct ixl_tx_buf *buf; int error = 0; @@ -813,10 +813,8 @@ struct ixl_tx_buf *buf; struct i40e_tx_desc *tx_desc, *eop_desc; - mtx_assert(&txr->mtx, MA_OWNED); - /* These are not the descriptors you seek, move along :) */ if (txr->avail == que->num_desc) { que->busy = 0; @@ -935,7 +933,7 @@ static void ixl_refresh_mbufs(struct ixl_queue *que, int limit) { - struct ixl_vsi *vsi = que->vsi; + struct ixl_ifx *ifx = que->ifx; struct rx_ring *rxr = &que->rxr; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; @@ -1017,7 +1015,7 @@ } update: if (refreshed) /* Update hardware tail index */ - wr32(vsi->hw, rxr->tail, rxr->next_refresh); + wr32(ifx->hw, rxr->tail, rxr->next_refresh); return; } @@ -1034,8 +1032,8 @@ ixl_allocate_rx_data(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; - struct ixl_vsi *vsi = que->vsi; - device_t dev = vsi->dev; + struct ixl_ifx *ifx = que->ifx; + device_t dev = ifx->dev; struct ixl_rx_buf *buf; int i, bsize, error; @@ -1109,9 +1107,9 @@ ixl_init_rx_ring(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; - struct ixl_vsi *vsi = que->vsi; + struct ixl_ifx *ifx = que->ifx; #if defined(INET6) || defined(INET) - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; struct lro_ctrl *lro = &rxr->lro; #endif struct ixl_rx_buf *buf; @@ -1212,8 +1210,8 @@ rxr->bytes = 0; rxr->discard = FALSE; - wr32(vsi->hw, rxr->tail, que->num_desc - 1); - ixl_flush(vsi->hw); + wr32(ifx->hw, rxr->tail, que->num_desc - 1); + ixl_flush(ifx->hw); #if defined(INET6) || defined(INET) /* @@ -1227,7 +1225,7 @@ } INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me); rxr->lro_enabled = TRUE; - lro->ifp = vsi->ifp; + lro->ifp = ifx->ifp; } #endif @@ -1437,9 +1435,9 @@ bool ixl_rxeof(struct ixl_queue *que, int count) { - struct ixl_vsi *vsi = que->vsi; + struct ixl_ifx *ifx = que->ifx; struct rx_ring *rxr = &que->rxr; - struct ifnet *ifp = vsi->ifp; + struct ifnet *ifp = ifx->ifp; #if defined(INET6) || defined(INET) struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; @@ -1718,36 +1716,36 @@ uint64_t ixl_get_counter(if_t ifp, ift_counter cnt) { - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; - vsi = if_getsoftc(ifp); + ifx = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: - return (vsi->ipackets); + return (ifx->ipackets); case IFCOUNTER_IERRORS: - return (vsi->ierrors); + return (ifx->ierrors); case IFCOUNTER_OPACKETS: - return (vsi->opackets); + return (ifx->opackets); case IFCOUNTER_OERRORS: - return (vsi->oerrors); + return (ifx->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: - return (vsi->ibytes); + return (ifx->ibytes); case IFCOUNTER_OBYTES: - return (vsi->obytes); + return (ifx->obytes); case IFCOUNTER_IMCASTS: - return (vsi->imcasts); + return (ifx->imcasts); case IFCOUNTER_OMCASTS: - return (vsi->omcasts); + return (ifx->omcasts); case IFCOUNTER_IQDROPS: - return (vsi->iqdrops); + return (ifx->iqdrops); case IFCOUNTER_OQDROPS: - return (vsi->oqdrops); + return (ifx->oqdrops); case IFCOUNTER_NOPROTO: - return (vsi->noproto); + return (ifx->noproto); default: return (if_get_counter_default(ifp, cnt)); } Index: sys/dev/ixl/ixlv.h =================================================================== --- sys/dev/ixl/ixlv.h +++ sys/dev/ixl/ixlv.h @@ -128,7 +128,7 @@ struct task aq_sched; struct taskqueue *tq; - struct ixl_vsi vsi; + struct ixl_ifx ifx; /* Filter lists */ struct mac_list *mac_filters; @@ -191,8 +191,8 @@ void ixlv_enable_queues(struct ixlv_sc *); void ixlv_disable_queues(struct ixlv_sc *); void ixlv_map_queues(struct ixlv_sc *); -void ixlv_enable_intr(struct ixl_vsi *); -void ixlv_disable_intr(struct ixl_vsi *); +void ixlv_enable_intr(struct ixl_ifx *); +void ixlv_disable_intr(struct ixl_ifx *); void ixlv_add_ether_filters(struct ixlv_sc *); void ixlv_del_ether_filters(struct ixlv_sc *); void ixlv_request_stats(struct ixlv_sc *); Index: sys/dev/ixl/ixlvc.c =================================================================== --- sys/dev/ixl/ixlvc.c +++ sys/dev/ixl/ixlvc.c @@ -354,16 +354,16 @@ ixlv_configure_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; struct tx_ring *txr; struct rx_ring *rxr; int len, pairs; struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - - pairs = vsi->num_queues; + + pairs = ifx->vsi.num_queues; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); @@ -394,7 +394,7 @@ vqpi->rxq.queue_id = i; vqpi->rxq.ring_len = que->num_desc; vqpi->rxq.dma_ring_addr = rxr->dma.pa; - vqpi->rxq.max_pkt_size = vsi->max_frame_size; + vqpi->rxq.max_pkt_size = ifx->max_frame_size; vqpi->rxq.databuffer_size = rxr->mbuf_sz; vqpi->rxq.splithdr_enabled = 0; } @@ -449,8 +449,8 @@ { struct i40e_virtchnl_irq_map_info *vm; int i, q, len; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_ifx *ifx = &sc->ifx; + struct ixl_queue *que = ifx->queues; /* How many queue vectors, adminq uses one */ q = sc->msix - 1; @@ -663,7 +663,7 @@ ixl_vc_schedule_retry(&sc->vc_mgr); return; } - a->vsi_id = sc->vsi.id; + a->vsi_id = sc->ifx.id; a->num_elements = cnt; /* Scan the filter array */ @@ -723,7 +723,7 @@ ixl_vc_schedule_retry(&sc->vc_mgr); return; } - d->vsi_id = sc->vsi.id; + d->vsi_id = sc->ifx.id; d->num_elements = cnt; /* Scan the filter array */ @@ -783,35 +783,35 @@ void ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) { - struct ixl_vsi *vsi; + struct ixl_ifx *ifx; uint64_t tx_discards; int i; - vsi = &sc->vsi; + ifx = &sc->ifx; tx_discards = es->tx_discards; - for (i = 0; i < sc->vsi.num_queues; i++) - tx_discards += sc->vsi.queues[i].txr.br->br_drops; + for (i = 0; i < sc->ifx.vsi.num_queues; i++) + tx_discards += sc->ifx.queues[i].txr.br->br_drops; /* Update ifnet stats */ - IXL_SET_IPACKETS(vsi, es->rx_unicast + + IXL_SET_IPACKETS(ifx, es->rx_unicast + es->rx_multicast + es->rx_broadcast); - IXL_SET_OPACKETS(vsi, es->tx_unicast + + IXL_SET_OPACKETS(ifx, es->tx_unicast + es->tx_multicast + es->tx_broadcast); - IXL_SET_IBYTES(vsi, es->rx_bytes); - IXL_SET_OBYTES(vsi, es->tx_bytes); - IXL_SET_IMCASTS(vsi, es->rx_multicast); - IXL_SET_OMCASTS(vsi, es->tx_multicast); - - IXL_SET_OERRORS(vsi, es->tx_errors); - IXL_SET_IQDROPS(vsi, es->rx_discards); - IXL_SET_OQDROPS(vsi, tx_discards); - IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); - IXL_SET_COLLISIONS(vsi, 0); - - sc->vsi.eth_stats = *es; + IXL_SET_IBYTES(ifx, es->rx_bytes); + IXL_SET_OBYTES(ifx, es->tx_bytes); + IXL_SET_IMCASTS(ifx, es->rx_multicast); + IXL_SET_OMCASTS(ifx, es->tx_multicast); + + IXL_SET_OERRORS(ifx, es->tx_errors); + IXL_SET_IQDROPS(ifx, es->rx_discards); + IXL_SET_OQDROPS(ifx, tx_discards); + IXL_SET_NOPROTO(ifx, es->rx_unknown_protocol); + IXL_SET_COLLISIONS(ifx, 0); + + sc->ifx.vsi.eth_stats = *es; } /* @@ -827,7 +827,7 @@ i40e_status v_retval, u8 *msg, u16 msglen) { device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; + struct ixl_ifx *ifx = &sc->ifx; if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { struct i40e_virtchnl_pf_event *vpe = @@ -840,9 +840,9 @@ vpe->event_data.link_event.link_status, vpe->event_data.link_event.link_speed); #endif - vsi->link_up = + ifx->link_up = vpe->event_data.link_event.link_status; - vsi->link_speed = + ifx->link_speed = vpe->event_data.link_event.link_speed; ixlv_update_link_status(sc); break; @@ -907,10 +907,10 @@ /* Update link status */ ixlv_update_link_status(sc); /* Turn on all interrupts */ - ixlv_enable_intr(vsi); + ixlv_enable_intr(ifx); /* And inform the stack we're ready */ - vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; - vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifx->ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifx->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: @@ -918,9 +918,9 @@ v_retval); if (v_retval == 0) { /* Turn off all interrupts */ - ixlv_disable_intr(vsi); + ixlv_disable_intr(ifx); /* Tell the stack that the interface is no longer active */ - vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + ifx->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } break; case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: