Index: head/sys/dev/ex/if_ex.c =================================================================== --- head/sys/dev/ex/if_ex.c (revision 331828) +++ head/sys/dev/ex/if_ex.c (revision 331829) @@ -1,1083 +1,1077 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1996, Javier Martín Rueda (jmrueda@diatel.upm.es) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * MAINTAINER: Matthew N. Dodd * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/10, Pro/10+ Ethernet driver * * Revision history: * * dd-mmm-yyyy: Multicast support ported from NetBSD's if_iy driver. * 30-Oct-1996: first beta version. Inet and BPF supported, but no multicast. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXDEBUG # define Start_End 1 # define Rcvd_Pkts 2 # define Sent_Pkts 4 # define Status 8 static int debug_mask = 0; # define DODEBUG(level, action) if (level & debug_mask) action #else # define DODEBUG(level, action) #endif devclass_t ex_devclass; char irq2eemap[] = { -1, -1, 0, 1, -1, 2, -1, -1, -1, 0, 3, 4, -1, -1, -1, -1 }; u_char ee2irqmap[] = { 9, 3, 5, 10, 11, 0, 0, 0 }; char plus_irq2eemap[] = { -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, -1, -1, -1 }; u_char plus_ee2irqmap[] = { 3, 4, 5, 7, 9, 10, 11, 12 }; /* Network Interface Functions */ static void ex_init(void *); static void ex_init_locked(struct ex_softc *); static void ex_start(struct ifnet *); static void ex_start_locked(struct ifnet *); static int ex_ioctl(struct ifnet *, u_long, caddr_t); static void ex_watchdog(void *); /* ifmedia Functions */ static int ex_ifmedia_upd(struct ifnet *); static void ex_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int ex_get_media(struct ex_softc *); static void ex_reset(struct ex_softc *); static void ex_setmulti(struct ex_softc *); static void ex_tx_intr(struct ex_softc *); static void ex_rx_intr(struct ex_softc *); void ex_get_address(struct ex_softc *sc, u_char *enaddr) { uint16_t eaddr_tmp; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Lo); enaddr[5] = eaddr_tmp & 0xff; enaddr[4] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Mid); enaddr[3] = eaddr_tmp & 0xff; enaddr[2] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Hi); enaddr[1] = eaddr_tmp & 0xff; enaddr[0] = eaddr_tmp >> 8; return; } int ex_card_type(u_char *enaddr) { if ((enaddr[0] == 0x00) && (enaddr[1] == 0xA0) && (enaddr[2] == 0xC9)) return (CARD_TYPE_EX_10_PLUS); return (CARD_TYPE_EX_10); } /* * Caller is responsible for eventually calling * ex_release_resources() on failure. */ int ex_alloc_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); int error = 0; sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->ioport_rid, RF_ACTIVE); if (!sc->ioport) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq) { device_printf(dev, "No IRQ?!\n"); error = ENOMEM; goto bad; } bad: return (error); } void ex_release_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); if (sc->ih) { bus_teardown_intr(dev, sc->irq, sc->ih); sc->ih = NULL; } if (sc->ioport) { bus_release_resource(dev, SYS_RES_IOPORT, sc->ioport_rid, sc->ioport); sc->ioport = NULL; } if (sc->irq) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } if (sc->ifp) if_free(sc->ifp); return; } int ex_attach(device_t dev) { struct ex_softc * sc = device_get_softc(dev); struct ifnet * ifp; struct ifmedia * ifm; int error; uint16_t temp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } /* work out which set of irq <-> internal tables to use */ if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) { sc->irq2ee = plus_irq2eemap; sc->ee2irq = plus_ee2irqmap; } else { sc->irq2ee = irq2eemap; sc->ee2irq = ee2irqmap; } sc->mem_size = CARD_RAM_SIZE; /* XXX This should be read from the card itself. */ /* * Initialize the ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ex_start; ifp->if_ioctl = ex_ioctl; ifp->if_init = ex_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts); mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); temp = ex_eeprom_read(sc, EE_W5); if (temp & EE_W5_PORT_TPE) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (temp & EE_W5_PORT_BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (temp & EE_W5_PORT_AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, ex_get_media(sc)); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ex_ifmedia_upd(ifp); /* * Attach the interface. */ ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ex_intr, (void *)sc, &sc->ih); if (error) { device_printf(dev, "bus_setup_intr() failed!\n"); ether_ifdetach(ifp); mtx_destroy(&sc->lock); return (error); } return(0); } int ex_detach(device_t dev) { struct ex_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; EX_LOCK(sc); ex_stop(sc); EX_UNLOCK(sc); ether_ifdetach(ifp); callout_drain(&sc->timer); ex_release_resources(dev); mtx_destroy(&sc->lock); return (0); } static void ex_init(void *xsc) { struct ex_softc * sc = (struct ex_softc *) xsc; EX_LOCK(sc); ex_init_locked(sc); EX_UNLOCK(sc); } static void ex_init_locked(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int i; unsigned short temp_reg; DODEBUG(Start_End, printf("%s: ex_init: start\n", ifp->if_xname);); sc->tx_timeout = 0; /* * Load the ethernet address into the card. */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); temp_reg = CSR_READ_1(sc, EEPROM_REG); if (temp_reg & Trnoff_Enable) CSR_WRITE_1(sc, EEPROM_REG, temp_reg & ~Trnoff_Enable); for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, I_ADDR_REG0 + i, IF_LLADDR(sc->ifp)[i]); /* * - Setup transmit chaining and discard bad received frames. * - Match broadcast. * - Clear test mode. * - Set receiving mode. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | Tx_Chn_Int_Md | Tx_Chn_ErStp | Disc_Bad_Fr); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | No_SA_Ins | RX_CRC_InMem); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3) & 0x3f /* XXX constants. */ ); /* * - Set IRQ number, if this part has it. ISA devices have this, * while PC Card devices don't seem to. Either way, we have to * switch to Bank1 as the rest of this code relies on that. */ CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); if (sc->flags & HAS_INT_NO_REG) CSR_WRITE_1(sc, INT_NO_REG, (CSR_READ_1(sc, INT_NO_REG) & 0xf8) | sc->irq2ee[sc->irq_no]); /* * Divide the available memory in the card into rcv and xmt buffers. * By default, I use the first 3/4 of the memory for the rcv buffer, * and the remaining 1/4 of the memory for the xmt buffer. */ sc->rx_mem_size = sc->mem_size * 3 / 4; sc->tx_mem_size = sc->mem_size - sc->rx_mem_size; sc->rx_lower_limit = 0x0000; sc->rx_upper_limit = sc->rx_mem_size - 2; sc->tx_lower_limit = sc->rx_mem_size; sc->tx_upper_limit = sc->mem_size - 2; CSR_WRITE_1(sc, RCV_LOWER_LIMIT_REG, sc->rx_lower_limit >> 8); CSR_WRITE_1(sc, RCV_UPPER_LIMIT_REG, sc->rx_upper_limit >> 8); CSR_WRITE_1(sc, XMT_LOWER_LIMIT_REG, sc->tx_lower_limit >> 8); CSR_WRITE_1(sc, XMT_UPPER_LIMIT_REG, sc->tx_upper_limit >> 8); /* * Enable receive and transmit interrupts, and clear any pending int. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); CSR_WRITE_1(sc, STATUS_REG, All_Int); /* * Initialize receive and transmit ring buffers. */ CSR_WRITE_2(sc, RCV_BAR, sc->rx_lower_limit); sc->rx_head = sc->rx_lower_limit; CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit | 0xfe); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE init\n");); callout_reset(&sc->timer, hz, ex_watchdog, sc); ex_setmulti(sc); /* * Final reset of the board, and enable operation. */ CSR_WRITE_1(sc, CMD_REG, Sel_Reset_CMD); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Rcv_Enable_CMD); ex_start_locked(ifp); DODEBUG(Start_End, printf("%s: ex_init: finish\n", ifp->if_xname);); } static void ex_start(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; EX_LOCK(sc); ex_start_locked(ifp); EX_UNLOCK(sc); } static void ex_start_locked(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; int i, len, data_len, avail, dest, next; unsigned char tmp16[2]; struct mbuf * opkt; struct mbuf * m; DODEBUG(Start_End, printf("ex_start%d: start\n", unit);); /* * Main loop: send outgoing packets to network card until there are no * more packets left, or the card cannot accept any more yet. */ while (((opkt = ifp->if_snd.ifq_head) != NULL) && !(ifp->if_drv_flags & IFF_DRV_OACTIVE)) { /* * Ensure there is enough free transmit buffer space for * this packet, including its header. Note: the header * cannot wrap around the end of the transmit buffer and * must be kept together, so we allow space for twice the * length of the header, just in case. */ for (len = 0, m = opkt; m != NULL; m = m->m_next) { len += m->m_len; } data_len = len; DODEBUG(Sent_Pkts, printf("1. Sending packet with %d data bytes. ", data_len);); if (len & 1) { len += XMT_HEADER_LEN + 1; } else { len += XMT_HEADER_LEN; } if ((i = sc->tx_tail - sc->tx_head) >= 0) { avail = sc->tx_mem_size - i; } else { avail = -i; } DODEBUG(Sent_Pkts, printf("i=%d, avail=%d\n", i, avail);); if (avail >= len + XMT_HEADER_LEN) { IF_DEQUEUE(&ifp->if_snd, opkt); #ifdef EX_PSA_INTR /* * Disable rx and tx interrupts, to avoid corruption * of the host address register by interrupt service * routines. * XXX Is this necessary with splimp() enabled? */ CSR_WRITE_1(sc, MASK_REG, All_Int); #endif /* * Compute the start and end addresses of this * frame in the tx buffer. */ dest = sc->tx_tail; next = dest + len; if (next > sc->tx_upper_limit) { if ((sc->tx_upper_limit + 2 - sc->tx_tail) <= XMT_HEADER_LEN) { dest = sc->tx_lower_limit; next = dest + len; } else { next = sc->tx_lower_limit + next - sc->tx_upper_limit - 2; } } /* * Build the packet frame in the card's ring buffer. */ DODEBUG(Sent_Pkts, printf("2. dest=%d, next=%d. ", dest, next);); CSR_WRITE_2(sc, HOST_ADDR_REG, dest); CSR_WRITE_2(sc, IO_PORT_REG, Transmit_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, next); CSR_WRITE_2(sc, IO_PORT_REG, data_len); /* * Output the packet data to the card. Ensure all * transfers are 16-bit wide, even if individual * mbufs have odd length. */ for (m = opkt, i = 0; m != NULL; m = m->m_next) { DODEBUG(Sent_Pkts, printf("[%d]", m->m_len);); if (i) { tmp16[1] = *(mtod(m, caddr_t)); CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); } CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) (mtod(m, caddr_t) + i), (m->m_len - i) / 2); if ((i = (m->m_len - i) & 1) != 0) { tmp16[0] = *(mtod(m, caddr_t) + m->m_len - 1); } } if (i) CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); /* * If there were other frames chained, update the * chain in the last one. */ if (sc->tx_head != sc->tx_tail) { if (sc->tx_tail != dest) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Chain_Point); CSR_WRITE_2(sc, IO_PORT_REG, dest); } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); i = CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); CSR_WRITE_2(sc, IO_PORT_REG, i | Ch_bit); } /* * Resume normal operation of the card: * - Make a dummy read to flush the DRAM write * pipeline. * - Enable receive and transmit interrupts. * - Send Transmit or Resume_XMT command, as * appropriate. */ CSR_READ_2(sc, IO_PORT_REG); #ifdef EX_PSA_INTR CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); #endif if (sc->tx_head == sc->tx_tail) { CSR_WRITE_2(sc, XMT_BAR, dest); CSR_WRITE_1(sc, CMD_REG, Transmit_CMD); sc->tx_head = dest; DODEBUG(Sent_Pkts, printf("Transmit\n");); } else { CSR_WRITE_1(sc, CMD_REG, Resume_XMT_List_CMD); DODEBUG(Sent_Pkts, printf("Resume\n");); } sc->tx_last = dest; sc->tx_tail = next; BPF_MTAP(ifp, opkt); sc->tx_timeout = 2; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(opkt); } else { ifp->if_drv_flags |= IFF_DRV_OACTIVE; DODEBUG(Status, printf("OACTIVE start\n");); } } DODEBUG(Start_End, printf("ex_start%d: finish\n", unit);); } void ex_stop(struct ex_softc *sc) { DODEBUG(Start_End, printf("ex_stop%d: start\n", unit);); EX_ASSERT_LOCKED(sc); /* * Disable card operation: * - Disable the interrupt line. * - Flush transmission and disable reception. * - Mask and clear all interrupts. * - Reset the 82595. */ CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) & ~TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, CMD_REG, Rcv_Stop); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; sc->tx_last = 0; /* XXX I think these two lines are not necessary, because ex_init will always be called again to reinit the interface. */ CSR_WRITE_1(sc, MASK_REG, All_Int); CSR_WRITE_1(sc, STATUS_REG, All_Int); CSR_WRITE_1(sc, CMD_REG, Reset_CMD); DELAY(200); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tx_timeout = 0; callout_stop(&sc->timer); DODEBUG(Start_End, printf("ex_stop%d: finish\n", unit);); return; } void ex_intr(void *arg) { struct ex_softc *sc = (struct ex_softc *)arg; struct ifnet *ifp = sc->ifp; int int_status, send_pkts; int loops = 100; DODEBUG(Start_End, printf("ex_intr%d: start\n", unit);); EX_LOCK(sc); send_pkts = 0; while (loops-- > 0 && (int_status = CSR_READ_1(sc, STATUS_REG)) & (Tx_Int | Rx_Int)) { /* don't loop forever */ if (int_status == 0xff) break; if (int_status & Rx_Int) { CSR_WRITE_1(sc, STATUS_REG, Rx_Int); ex_rx_intr(sc); } else if (int_status & Tx_Int) { CSR_WRITE_1(sc, STATUS_REG, Tx_Int); ex_tx_intr(sc); send_pkts = 1; } } if (loops == 0) printf("100 loops are not enough\n"); /* * If any packet has been transmitted, and there are queued packets to * be sent, attempt to send more packets to the network card. */ if (send_pkts && (ifp->if_snd.ifq_head != NULL)) ex_start_locked(ifp); EX_UNLOCK(sc); DODEBUG(Start_End, printf("ex_intr%d: finish\n", unit);); return; } static void ex_tx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int tx_status; DODEBUG(Start_End, printf("ex_tx_intr%d: start\n", unit);); /* * - Cancel the watchdog. * For all packets transmitted since last transmit interrupt: * - Advance chain pointer to next queued packet. * - Update statistics. */ sc->tx_timeout = 0; while (sc->tx_head != sc->tx_tail) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_head); if (!(CSR_READ_2(sc, IO_PORT_REG) & Done_bit)) break; tx_status = CSR_READ_2(sc, IO_PORT_REG); sc->tx_head = CSR_READ_2(sc, IO_PORT_REG); if (tx_status & TX_OK_bit) { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, tx_status & No_Collisions_bits); } /* * The card should be ready to accept more packets now. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE tx_intr\n");); DODEBUG(Start_End, printf("ex_tx_intr%d: finish\n", unit);); return; } static void ex_rx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int rx_status; int pkt_len; int QQQ; struct mbuf * m; struct mbuf * ipkt; struct ether_header * eh; DODEBUG(Start_End, printf("ex_rx_intr%d: start\n", unit);); /* * For all packets received since last receive interrupt: * - If packet ok, read it into a new mbuf and queue it to interface, * updating statistics. * - If packet bad, just discard it, and update statistics. * Finally, advance receive stop limit in card's memory to new location. */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); while (CSR_READ_2(sc, IO_PORT_REG) == RCV_Done) { rx_status = CSR_READ_2(sc, IO_PORT_REG); sc->rx_head = CSR_READ_2(sc, IO_PORT_REG); QQQ = pkt_len = CSR_READ_2(sc, IO_PORT_REG); if (rx_status & RCV_OK_bit) { MGETHDR(m, M_NOWAIT, MT_DATA); ipkt = m; if (ipkt == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); } else { ipkt->m_pkthdr.rcvif = ifp; ipkt->m_pkthdr.len = pkt_len; ipkt->m_len = MHLEN; while (pkt_len > 0) { if (pkt_len >= MINCLSIZE) { if (MCLGET(m, M_NOWAIT)) { m->m_len = MCLBYTES; } else { m_freem(ipkt); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto rx_another; } } m->m_len = min(m->m_len, pkt_len); /* * NOTE: I'm assuming that all mbufs allocated are of even length, * except for the last one in an odd-length packet. */ CSR_READ_MULTI_2(sc, IO_PORT_REG, mtod(m, uint16_t *), m->m_len / 2); if (m->m_len & 1) { *(mtod(m, caddr_t) + m->m_len - 1) = CSR_READ_1(sc, IO_PORT_REG); } pkt_len -= m->m_len; if (pkt_len > 0) { MGET(m->m_next, M_NOWAIT, MT_DATA); if (m->m_next == NULL) { m_freem(ipkt); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); goto rx_another; } m = m->m_next; m->m_len = MLEN; } } eh = mtod(ipkt, struct ether_header *); #ifdef EXDEBUG if (debug_mask & Rcvd_Pkts) { if ((eh->ether_dhost[5] != 0xff) || (eh->ether_dhost[0] != 0xff)) { printf("Receive packet with %d data bytes: %6D -> ", QQQ, eh->ether_shost, ":"); printf("%6D\n", eh->ether_dhost, ":"); } /* QQQ */ } #endif EX_UNLOCK(sc); (*ifp->if_input)(ifp, ipkt); EX_LOCK(sc); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); } } else { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); rx_another: ; } if (sc->rx_head < sc->rx_lower_limit + 2) CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit); else CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_head - 2); DODEBUG(Start_End, printf("ex_rx_intr%d: finish\n", unit);); return; } static int ex_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ex_softc * sc = ifp->if_softc; struct ifreq * ifr = (struct ifreq *)data; int error = 0; DODEBUG(Start_End, printf("%s: ex_ioctl: start ", ifp->if_xname);); switch(cmd) { - case SIOCSIFADDR: - case SIOCGIFADDR: - case SIOCSIFMTU: - error = ether_ioctl(ifp, cmd, data); - break; - case SIOCSIFFLAGS: DODEBUG(Start_End, printf("SIOCSIFFLAGS");); EX_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ex_stop(sc); } else { ex_init_locked(sc); } EX_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ex_init(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, cmd); break; default: - DODEBUG(Start_End, printf("unknown");); - error = EINVAL; + error = ether_ioctl(ifp, cmd, data); + break; } DODEBUG(Start_End, printf("\n%s: ex_ioctl: finish\n", ifp->if_xname);); return(error); } static void ex_setmulti(struct ex_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *maddr; uint16_t *addr; int count; int timeout, status; ifp = sc->ifp; count = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; count++; } if_maddr_runlock(ifp); if ((ifp->if_flags & IFF_PROMISC) || (ifp->if_flags & IFF_ALLMULTI) || count > 63) { /* Interface is in promiscuous mode or there are too many * multicast addresses for the card to handle */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Promisc_Mode); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } else if ((ifp->if_flags & IFF_MULTICAST) && (count > 0)) { /* Program multicast addresses plus our MAC address * into the filter */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Multi_IA); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); /* Borrow space from TX buffer; this should be safe * as this is only called from ex_init */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_lower_limit); CSR_WRITE_2(sc, IO_PORT_REG, MC_Setup_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, (count + 1) * 6); if_maddr_rlock(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; addr = (uint16_t*)LLADDR((struct sockaddr_dl *) maddr->ifma_addr); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); } if_maddr_runlock(ifp); /* Program our MAC address as well */ /* XXX: Is this necessary? The Linux driver does this * but the NetBSD driver does not */ addr = (uint16_t*)IF_LLADDR(sc->ifp); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); CSR_WRITE_1(sc, CMD_REG, MC_Setup_CMD); sc->tx_head = sc->tx_lower_limit; sc->tx_tail = sc->tx_head + XMT_HEADER_LEN + (count + 1) * 6; for (timeout=0; timeout<100; timeout++) { DELAY(2); if ((CSR_READ_1(sc, STATUS_REG) & Exec_Int) == 0) continue; status = CSR_READ_1(sc, CMD_REG); CSR_WRITE_1(sc, STATUS_REG, Exec_Int); break; } sc->tx_head = sc->tx_tail; } else { /* No multicast or promiscuous mode */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) & 0xDE); /* ~(Multi_IA | Promisc_Mode) */ CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } } static void ex_reset(struct ex_softc *sc) { DODEBUG(Start_End, printf("ex_reset%d: start\n", unit);); EX_ASSERT_LOCKED(sc); ex_stop(sc); ex_init_locked(sc); DODEBUG(Start_End, printf("ex_reset%d: finish\n", unit);); return; } static void ex_watchdog(void *arg) { struct ex_softc * sc = arg; struct ifnet *ifp = sc->ifp; if (sc->tx_timeout && --sc->tx_timeout == 0) { DODEBUG(Start_End, if_printf(ifp, "ex_watchdog: start\n");); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; DODEBUG(Status, printf("OIDLE watchdog\n");); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ex_reset(sc); ex_start_locked(ifp); DODEBUG(Start_End, if_printf(ifp, "ex_watchdog: finish\n");); } callout_reset(&sc->timer, hz, ex_watchdog, sc); } static int ex_get_media(struct ex_softc *sc) { int current; int media; media = ex_eeprom_read(sc, EE_W5); CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); current = CSR_READ_1(sc, REG3); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); if ((current & TPE_bit) && (media & EE_W5_PORT_TPE)) return(IFM_ETHER|IFM_10_T); if ((current & BNC_bit) && (media & EE_W5_PORT_BNC)) return(IFM_ETHER|IFM_10_2); if (media & EE_W5_PORT_AUI) return (IFM_ETHER|IFM_10_5); return (IFM_ETHER|IFM_AUTO); } static int ex_ifmedia_upd(ifp) struct ifnet * ifp; { struct ex_softc * sc = ifp->if_softc; if (IFM_TYPE(sc->ifmedia.ifm_media) != IFM_ETHER) return EINVAL; return (0); } static void ex_ifmedia_sts(ifp, ifmr) struct ifnet * ifp; struct ifmediareq * ifmr; { struct ex_softc * sc = ifp->if_softc; EX_LOCK(sc); ifmr->ifm_active = ex_get_media(sc); ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; EX_UNLOCK(sc); return; } u_short ex_eeprom_read(struct ex_softc *sc, int location) { int i; u_short data = 0; int read_cmd = location | EE_READ_CMD; short ctrl_val = EECS; CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, EEPROM_REG, EECS); for (i = 8; i >= 0; i--) { short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI : ctrl_val; CSR_WRITE_1(sc, EEPROM_REG, outval); CSR_WRITE_1(sc, EEPROM_REG, outval | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, outval); DELAY(2); } CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); for (i = 16; i > 0; i--) { CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); data = (data << 1) | ((CSR_READ_1(sc, EEPROM_REG) & EEDO) ? 1 : 0); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); } ctrl_val &= ~EECS; CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); return(data); } Index: head/sys/dev/ixgb/if_ixgb.c =================================================================== --- head/sys/dev/ixgb/if_ixgb.c (revision 331828) +++ head/sys/dev/ixgb/if_ixgb.c (revision 331829) @@ -1,2538 +1,2533 @@ /******************************************************************************* SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int ixgb_display_debug_stats = 0; /********************************************************************* * Linked list of board private structures for all NICs found *********************************************************************/ struct adapter *ixgb_adapter_list = NULL; /********************************************************************* * Driver version *********************************************************************/ char ixgb_driver_version[] = "1.0.6"; char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixgb_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixgb_vendor_info_t ixgb_vendor_info_array[] = { /* Intel(R) PRO/10000 Network Connection */ {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *ixgb_strings[] = { "Intel(R) PRO/10GbE Network Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixgb_probe(device_t); static int ixgb_attach(device_t); static int ixgb_detach(device_t); static int ixgb_shutdown(device_t); static void ixgb_intr(void *); static void ixgb_start(struct ifnet *); static void ixgb_start_locked(struct ifnet *); static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); static uint64_t ixgb_get_counter(struct ifnet *, ift_counter); static void ixgb_watchdog(struct adapter *); static void ixgb_init(void *); static void ixgb_init_locked(struct adapter *); static void ixgb_stop(void *); static void ixgb_media_status(struct ifnet *, struct ifmediareq *); static int ixgb_media_change(struct ifnet *); static void ixgb_identify_hardware(struct adapter *); static int ixgb_allocate_pci_resources(struct adapter *); static void ixgb_free_pci_resources(struct adapter *); static void ixgb_local_timer(void *); static int ixgb_hardware_init(struct adapter *); static int ixgb_setup_interface(device_t, struct adapter *); static int ixgb_setup_transmit_structures(struct adapter *); static void ixgb_initialize_transmit_unit(struct adapter *); static int ixgb_setup_receive_structures(struct adapter *); static void ixgb_initialize_receive_unit(struct adapter *); static void ixgb_enable_intr(struct adapter *); static void ixgb_disable_intr(struct adapter *); static void ixgb_free_transmit_structures(struct adapter *); static void ixgb_free_receive_structures(struct adapter *); static void ixgb_update_stats_counters(struct adapter *); static void ixgb_clean_transmit_interrupts(struct adapter *); static int ixgb_allocate_receive_structures(struct adapter *); static int ixgb_allocate_transmit_structures(struct adapter *); static int ixgb_process_receive_interrupts(struct adapter *, int); static void ixgb_receive_checksum(struct adapter *, struct ixgb_rx_desc * rx_desc, struct mbuf *); static void ixgb_transmit_checksum_setup(struct adapter *, struct mbuf *, u_int8_t *); static void ixgb_set_promisc(struct adapter *); static void ixgb_disable_promisc(struct adapter *); static void ixgb_set_multi(struct adapter *); static void ixgb_print_hw_stats(struct adapter *); static void ixgb_print_link_status(struct adapter *); static int ixgb_get_buf(int i, struct adapter *, struct mbuf *); static void ixgb_enable_vlans(struct adapter * adapter); static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ixgb_dma_malloc(struct adapter *, bus_size_t, struct ixgb_dma_alloc *, int); static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); #ifdef DEVICE_POLLING static poll_handler_t ixgb_poll; #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixgb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixgb_probe), DEVMETHOD(device_attach, ixgb_attach), DEVMETHOD(device_detach, ixgb_detach), DEVMETHOD(device_shutdown, ixgb_shutdown), DEVMETHOD_END }; static driver_t ixgb_driver = { "ixgb", ixgb_methods, sizeof(struct adapter), }; static devclass_t ixgb_devclass; DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); MODULE_DEPEND(ixgb, pci, 1, 1, 1); MODULE_DEPEND(ixgb, ether, 1, 1, 1); /* some defines for controlling descriptor fetches in h/w */ #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is * pushed this many descriptors from * head */ #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ /********************************************************************* * Device identification routine * * ixgb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_probe(device_t dev) { ixgb_vendor_info_t *ent; u_int16_t pci_vendor_id = 0; u_int16_t pci_device_id = 0; u_int16_t pci_subvendor_id = 0; u_int16_t pci_subdevice_id = 0; char adapter_name[60]; INIT_DEBUGOUT("ixgb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IXGB_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixgb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s, Version - %s", ixgb_strings[ent->index], ixgb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_attach(device_t dev) { struct adapter *adapter; int tsize, rsize; int error = 0; device_printf(dev, "%s\n", ixgb_copyright); INIT_DEBUGOUT("ixgb_attach: begin"); /* Allocate, clear, and link in our adapter structure */ if (!(adapter = device_get_softc(dev))) { device_printf(dev, "adapter structure allocation failed\n"); return (ENOMEM); } bzero(adapter, sizeof(struct adapter)); adapter->dev = dev; adapter->osdep.dev = dev; IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); if (ixgb_adapter_list != NULL) ixgb_adapter_list->prev = adapter; adapter->next = ixgb_adapter_list; ixgb_adapter_list = adapter; /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, (void *)adapter, 0, ixgb_sysctl_stats, "I", "Statistics"); callout_init_mtx(&adapter->timer, &adapter->mtx, 0); /* Determine hardware revision */ ixgb_identify_hardware(adapter); /* Parameters (to be read from user) */ adapter->num_tx_desc = IXGB_MAX_TXD; adapter->num_rx_desc = IXGB_MAX_RXD; adapter->tx_int_delay = TIDV; adapter->rx_int_delay = RDTR; adapter->rx_buffer_len = IXGB_RXBUFFER_2048; adapter->hw.fc.high_water = FCRTH; adapter->hw.fc.low_water = FCRTL; adapter->hw.fc.pause_time = FCPAUSE; adapter->hw.fc.send_xon = TRUE; adapter->hw.fc.type = FLOW_CONTROL; /* Set the max frame size assuming standard ethernet sized frames */ adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ixgb_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } tsize = IXGB_ROUNDUP(adapter->num_tx_desc * sizeof(struct ixgb_tx_desc), 4096); /* Allocate Transmit Descriptor ring */ if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TxDescriptor memory\n"); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; rsize = IXGB_ROUNDUP(adapter->num_rx_desc * sizeof(struct ixgb_rx_desc), 4096); /* Allocate Receive Descriptor ring */ if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate rx_desc memory\n"); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_hw_init; } /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { device_printf(dev, "Unable to initialize the hardware\n"); error = EIO; goto err_hw_init; } /* Setup OS specific network interface */ if (ixgb_setup_interface(dev, adapter) != 0) goto err_hw_init; /* Initialize statistics */ ixgb_clear_hw_cntrs(&adapter->hw); ixgb_update_stats_counters(adapter); INIT_DEBUGOUT("ixgb_attach: end"); return (0); err_hw_init: ixgb_dma_free(adapter, &adapter->rxdma); err_rx_desc: ixgb_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: if (adapter->ifp != NULL) if_free(adapter->ifp); ixgb_free_pci_resources(adapter); sysctl_ctx_free(&adapter->sysctl_ctx); free(adapter->mta, M_DEVBUF); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("ixgb_detach: begin"); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IXGB_LOCK(adapter); adapter->in_detach = 1; ixgb_stop(adapter); IXGB_UNLOCK(adapter); #if __FreeBSD_version < 500000 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifdetach(ifp); #endif callout_drain(&adapter->timer); ixgb_free_pci_resources(adapter); #if __FreeBSD_version >= 500000 if_free(ifp); #endif /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { ixgb_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { ixgb_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } /* Remove from the adapter list */ if (ixgb_adapter_list == adapter) ixgb_adapter_list = adapter->next; if (adapter->next != NULL) adapter->next->prev = adapter->prev; if (adapter->prev != NULL) adapter->prev->next = adapter->next; free(adapter->mta, M_DEVBUF); IXGB_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixgb_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); IXGB_LOCK(adapter); ixgb_stop(adapter); IXGB_UNLOCK(adapter); return (0); } /********************************************************************* * Transmit entry point * * ixgb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void ixgb_start_locked(struct ifnet * ifp) { struct mbuf *m_head; struct adapter *adapter = ifp->if_softc; IXGB_LOCK_ASSERT(adapter); if (!adapter->link_active) return; while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (ixgb_encap(adapter, m_head)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ #if __FreeBSD_version < 500000 if (ifp->if_bpf) bpf_mtap(ifp, m_head); #else ETHER_BPF_MTAP(ifp, m_head); #endif /* Set timeout in case hardware has problems transmitting */ adapter->tx_timer = IXGB_TX_TIMEOUT; } return; } static void ixgb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; IXGB_LOCK(adapter); ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * Ioctl entry point * * ixgb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) { int mask, error = 0; struct ifreq *ifr = (struct ifreq *) data; struct adapter *adapter = ifp->if_softc; if (adapter->in_detach) goto out; switch (command) { - case SIOCSIFADDR: - case SIOCGIFADDR: - IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); - ether_ioctl(ifp, command, data); - break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { error = EINVAL; } else { IXGB_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->hw.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); IXGB_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ixgb_init_locked(adapter); } ixgb_disable_promisc(adapter); ixgb_set_promisc(adapter); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_stop(adapter); } } IXGB_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ixgb_set_multi(adapter); ixgb_enable_intr(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(ixgb_poll, ifp); if (error) return(error); IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IXGB_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IXGB_LOCK(adapter); ixgb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IXGB_UNLOCK(adapter); } } #endif /* DEVICE_POLLING */ if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixgb_init(adapter); } break; default: - IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); - error = EINVAL; + error = ether_ioctl(ifp, command, data); + break; } out: return (error); } /********************************************************************* * Watchdog entry point * * This routine is called whenever hardware quits transmitting. * **********************************************************************/ static void ixgb_watchdog(struct adapter *adapter) { struct ifnet *ifp; ifp = adapter->ifp; /* * If we are in this routine because of pause frames, then don't * reset the hardware. */ if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { adapter->tx_timer = IXGB_TX_TIMEOUT; return; } if_printf(ifp, "watchdog timeout -- resetting\n"); ixgb_stop(adapter); ixgb_init_locked(adapter); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return; } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void ixgb_init_locked(struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_init: begin"); IXGB_LOCK_ASSERT(adapter); ixgb_stop(adapter); ifp = adapter->ifp; /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr, IXGB_ETH_LENGTH_OF_ADDRESS); /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { if_printf(ifp, "Unable to initialize the hardware\n"); return; } ixgb_enable_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (ixgb_setup_transmit_structures(adapter)) { if_printf(ifp, "Could not setup transmit structures\n"); ixgb_stop(adapter); return; } ixgb_initialize_transmit_unit(adapter); /* Setup Multicast table */ ixgb_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (ixgb_setup_receive_structures(adapter)) { if_printf(ifp, "Could not setup receive structures\n"); ixgb_stop(adapter); return; } ixgb_initialize_receive_unit(adapter); /* Don't lose promiscuous settings */ ixgb_set_promisc(adapter); ifp = adapter->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; /* Enable jumbo frames */ if (ifp->if_mtu > ETHERMTU) { uint32_t temp_reg; IXGB_WRITE_REG(&adapter->hw, MFS, adapter->hw.max_frame_size << IXGB_MFS_SHIFT); temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); temp_reg |= IXGB_CTRL0_JFE; IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); } callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter); ixgb_clear_hw_cntrs(&adapter->hw); #ifdef DEVICE_POLLING /* * Only disable interrupts if we are polling, make sure they are on * otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) ixgb_disable_intr(adapter); else #endif ixgb_enable_intr(adapter); return; } static void ixgb_init(void *arg) { struct adapter *adapter = arg; IXGB_LOCK(adapter); ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); return; } #ifdef DEVICE_POLLING static int ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; u_int32_t reg_icr; int rx_npkts; IXGB_LOCK_ASSERT(adapter); if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); } } rx_npkts = ixgb_process_receive_interrupts(adapter, count); ixgb_clean_transmit_interrupts(adapter); if (ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); return (rx_npkts); } static int ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; int rx_npkts = 0; IXGB_LOCK(adapter); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = ixgb_poll_locked(ifp, cmd, count); IXGB_UNLOCK(adapter); return (rx_npkts); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Interrupt Service routine * **********************************************************************/ static void ixgb_intr(void *arg) { u_int32_t loop_cnt = IXGB_MAX_INTR; u_int32_t reg_icr; struct ifnet *ifp; struct adapter *adapter = arg; boolean_t rxdmt0 = FALSE; IXGB_LOCK(adapter); ifp = adapter->ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { IXGB_UNLOCK(adapter); return; } #endif reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr == 0) { IXGB_UNLOCK(adapter); return; } if (reg_icr & IXGB_INT_RXDMT0) rxdmt0 = TRUE; #ifdef _SV_ if (reg_icr & IXGB_INT_RXDMT0) adapter->sv_stats.icr_rxdmt0++; if (reg_icr & IXGB_INT_RXO) adapter->sv_stats.icr_rxo++; if (reg_icr & IXGB_INT_RXT0) adapter->sv_stats.icr_rxt0++; if (reg_icr & IXGB_INT_TXDW) adapter->sv_stats.icr_TXDW++; #endif /* _SV_ */ /* Link status change */ if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); } while (loop_cnt > 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_process_receive_interrupts(adapter, -1); ixgb_clean_transmit_interrupts(adapter); } loop_cnt--; } if (rxdmt0 && adapter->raidc) { IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); } if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("ixgb_media_status: begin"); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->hw.link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixgb_media_change(struct ifnet * ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("ixgb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head) { u_int8_t txd_popts; int i, j, error, nsegs; #if __FreeBSD_version < 500000 struct ifvlan *ifv = NULL; #endif bus_dma_segment_t segs[IXGB_MAX_SCATTER]; bus_dmamap_t map; struct ixgb_buffer *tx_buffer = NULL; struct ixgb_tx_desc *current_tx_desc = NULL; struct ifnet *ifp = adapter->ifp; /* * Force a cleanup if number of TX descriptors available hits the * threshold */ if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { ixgb_clean_transmit_interrupts(adapter); } if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { adapter->no_tx_desc_avail1++; return (ENOBUFS); } /* * Map the packet for DMA. */ if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { adapter->no_tx_map_avail++; return (ENOMEM); } error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { adapter->no_tx_dma_setup++; if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; " "error %u\n", error); bus_dmamap_destroy(adapter->txtag, map); return (error); } KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); if (nsegs > adapter->num_tx_desc_avail) { adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } if (ifp->if_hwassist > 0) { ixgb_transmit_checksum_setup(adapter, m_head, &txd_popts); } else txd_popts = 0; /* Find out if we are in vlan mode */ #if __FreeBSD_version < 500000 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #elseif __FreeBSD_version < 700000 mtag = VLAN_OUTPUT_TAG(ifp, m_head); #endif i = adapter->next_avail_tx_desc; for (j = 0; j < nsegs; j++) { tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buff_addr = htole64(segs[j].ds_addr); current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); current_tx_desc->popts = txd_popts; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; } adapter->num_tx_desc_avail -= nsegs; adapter->next_avail_tx_desc = i; #if __FreeBSD_version < 500000 if (ifv != NULL) { /* Set the vlan id */ current_tx_desc->vlan = ifv->ifv_tag; #elseif __FreeBSD_version < 700000 if (mtag != NULL) { /* Set the vlan id */ current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); #else if (m_head->m_flags & M_VLANTAG) { current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag; #endif /* Tell hardware to add tag */ current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; } tx_buffer->m_head = m_head; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet needs End Of Packet (EOP) */ current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 * that this frame is available to transmit. */ IXGB_WRITE_REG(&adapter->hw, TDT, i); return (0); } static void ixgb_set_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= IXGB_RCTL_MPE; reg_rctl &= ~IXGB_RCTL_UPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } return; } static void ixgb_disable_promisc(struct adapter * adapter) { u_int32_t reg_rctl; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= (~IXGB_RCTL_UPE); reg_rctl &= (~IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void ixgb_set_multi(struct adapter * adapter) { u_int32_t reg_rctl = 0; u_int8_t *mta; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("ixgb_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS * MAX_NUM_MULTICAST_ADDRESSES); if_maddr_rlock(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); mcnt++; } if_maddr_runlock(ifp); if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl |= IXGB_RCTL_MPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); return; } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void ixgb_local_timer(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ixgb_update_stats_counters(adapter); if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_print_hw_stats(adapter); } if (adapter->tx_timer != 0 && --adapter->tx_timer == 0) ixgb_watchdog(adapter); callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter); } static void ixgb_print_link_status(struct adapter * adapter) { if (adapter->hw.link_up) { if (!adapter->link_active) { if_printf(adapter->ifp, "Link is up %d Mbps %s \n", 10000, "Full Duplex"); adapter->link_active = 1; } } else { if (adapter->link_active) { if_printf(adapter->ifp, "Link is Down \n"); adapter->link_active = 0; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixgb_stop(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); INIT_DEBUGOUT("ixgb_stop: begin\n"); ixgb_disable_intr(adapter); adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); callout_stop(&adapter->timer); ixgb_free_transmit_structures(adapter); ixgb_free_receive_structures(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); adapter->tx_timer = 0; return; } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void ixgb_identify_hardware(struct adapter * adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MacType, etc. based on this PCI info */ switch (adapter->hw.device_id) { case IXGB_DEVICE_ID_82597EX: case IXGB_DEVICE_ID_82597EX_SR: adapter->hw.mac_type = ixgb_82597; break; default: INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); device_printf(dev, "unsupported device id 0x%x\n", adapter->hw.device_id); } return; } static int ixgb_allocate_pci_resources(struct adapter * adapter) { int rid; device_t dev = adapter->dev; rid = IXGB_MMBA; adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(adapter->res_memory)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->res_memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; rid = 0x0; adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { device_printf(dev, "Unable to allocate bus resource: interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, NULL, (void (*) (void *))ixgb_intr, adapter, &adapter->int_handler_tag)) { device_printf(dev, "Error registering interrupt handler!\n"); return (ENXIO); } adapter->hw.back = &adapter->osdep; return (0); } static void ixgb_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; if (adapter->res_interrupt != NULL) { bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); } if (adapter->res_memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, adapter->res_memory); } if (adapter->res_ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->res_ioport); } return; } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. The controller is reset, the EEPROM is * verified, the MAC address is set, then the shared initialization * routines are called. * **********************************************************************/ static int ixgb_hardware_init(struct adapter * adapter) { /* Issue a global reset */ adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n"); return (EIO); } if (!ixgb_init_hw(&adapter->hw)) { device_printf(adapter->dev, "Hardware Initialization Failed"); return (EIO); } return (0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int ixgb_setup_interface(device_t dev, struct adapter * adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } #if __FreeBSD_version >= 502000 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #else ifp->if_unit = device_get_unit(dev); ifp->if_name = "ixgb"; #endif ifp->if_baudrate = 1000000000; ifp->if_init = ixgb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixgb_ioctl; ifp->if_start = ixgb_start; ifp->if_get_counter = ixgb_get_counter; ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; #if __FreeBSD_version < 500000 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifattach(ifp, adapter->hw.curr_mac_addr); #endif ifp->if_capabilities = IFCAP_HWCSUM; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); #if __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #endif ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, ixgb_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } static int ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, struct ixgb_dma_alloc * dma, int mapflags) { device_t dev; int r; dev = adapter->dev; r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &dma->dma_tag); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; " "error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; " "error %u\n", r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ixgb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; " "error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int ixgb_allocate_transmit_structures(struct adapter * adapter) { if (!(adapter->tx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n"); return ENOMEM; } bzero(adapter->tx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_tx_desc); return 0; } /********************************************************************* * * Allocate and initialize transmit structures. * **********************************************************************/ static int ixgb_setup_transmit_structures(struct adapter * adapter) { /* * Setup DMA descriptor areas. */ if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ IXGB_MAX_SCATTER, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->txtag)) { device_printf(adapter->dev, "Unable to allocate TX DMA tag\n"); return (ENOMEM); } if (ixgb_allocate_transmit_structures(adapter)) return ENOMEM; bzero((void *)adapter->tx_desc_base, (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); adapter->next_avail_tx_desc = 0; adapter->oldest_used_tx_desc = 0; /* Set number of descriptors available */ adapter->num_tx_desc_avail = adapter->num_tx_desc; /* Set checksum context */ adapter->active_checksum_context = OFFLOAD_NONE; return 0; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void ixgb_initialize_transmit_unit(struct adapter * adapter) { u_int32_t reg_tctl; u_int64_t tdba = adapter->txdma.dma_paddr; /* Setup the Base and Length of the Tx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); IXGB_WRITE_REG(&adapter->hw, TDLEN, adapter->num_tx_desc * sizeof(struct ixgb_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ IXGB_WRITE_REG(&adapter->hw, TDH, 0); IXGB_WRITE_REG(&adapter->hw, TDT, 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IXGB_READ_REG(&adapter->hw, TDBAL), IXGB_READ_REG(&adapter->hw, TDLEN)); IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); /* Program the Transmit Control Register */ reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); /* Setup Transmit Descriptor Settings for this adapter */ adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; if (adapter->tx_int_delay > 0) adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; return; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void ixgb_free_transmit_structures(struct adapter * adapter) { struct ixgb_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { tx_buffer = adapter->tx_buffer_area; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); } tx_buffer->m_head = NULL; } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } return; } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). We change the * context only if the protocol type changes. * **********************************************************************/ static void ixgb_transmit_checksum_setup(struct adapter * adapter, struct mbuf * mp, u_int8_t * txd_popts) { struct ixgb_context_desc *TXD; struct ixgb_buffer *tx_buffer; int curr_txd; if (mp->m_pkthdr.csum_flags) { if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) return; else adapter->active_checksum_context = OFFLOAD_TCP_IP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_UDP_IP) return; else adapter->active_checksum_context = OFFLOAD_UDP_IP; } else { *txd_popts = 0; return; } } else { *txd_popts = 0; return; } /* * If we reach this point, the checksum offload context needs to be * reset. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); TXD->tucse = 0; TXD->mss = 0; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); } TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; tx_buffer->m_head = NULL; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; return; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void ixgb_clean_transmit_interrupts(struct adapter * adapter) { int i, num_avail; struct ixgb_buffer *tx_buffer; struct ixgb_tx_desc *tx_desc; IXGB_LOCK_ASSERT(adapter); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; #ifdef _SV_ adapter->clean_tx_interrupts++; #endif num_avail = adapter->num_tx_desc_avail; i = adapter->oldest_used_tx_desc; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { tx_desc->status = 0; num_avail++; if (tx_buffer->m_head) { bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } if (++i == adapter->num_tx_desc) i = 0; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; } adapter->oldest_used_tx_desc = i; /* * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that * it is OK to send packets. If there are no pending descriptors, * clear the timeout. Otherwise, if some descriptors have been freed, * restart the timeout. */ if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { struct ifnet *ifp = adapter->ifp; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (num_avail == adapter->num_tx_desc) adapter->tx_timer = 0; else if (num_avail == adapter->num_tx_desc_avail) adapter->tx_timer = IXGB_TX_TIMEOUT; } adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int ixgb_get_buf(int i, struct adapter * adapter, struct mbuf * nmp) { struct mbuf *mp = nmp; struct ixgb_buffer *rx_buffer; struct ifnet *ifp; bus_addr_t paddr; int error; ifp = adapter->ifp; if (mp == NULL) { mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { adapter->mbuf_alloc_failed++; return (ENOBUFS); } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } if (ifp->if_mtu <= ETHERMTU) { m_adj(mp, ETHER_ALIGN); } rx_buffer = &adapter->rx_buffer_area[i]; /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, mtod(mp, void *), mp->m_len, ixgb_dmamap_cb, &paddr, 0); if (error) { m_free(mp); return (error); } rx_buffer->m_head = mp; adapter->rx_desc_base[i].buff_addr = htole64(paddr); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int ixgb_allocate_receive_structures(struct adapter * adapter) { int i, error; struct ixgb_buffer *rx_buffer; if (!(adapter->rx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(adapter->dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } bzero(adapter->rx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->rxtag); if (error != 0) { device_printf(adapter->dev, "ixgb_allocate_receive_structures: " "bus_dma_tag_create failed; error %u\n", error); goto fail_0; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error != 0) { device_printf(adapter->dev, "ixgb_allocate_receive_structures: " "bus_dmamap_create failed; error %u\n", error); goto fail_1; } } for (i = 0; i < adapter->num_rx_desc; i++) { if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { adapter->rx_buffer_area[i].m_head = NULL; adapter->rx_desc_base[i].buff_addr = 0; return (ENOBUFS); } } return (0); fail_1: bus_dma_tag_destroy(adapter->rxtag); fail_0: adapter->rxtag = NULL; free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; return (error); } /********************************************************************* * * Allocate and initialize receive structures. * **********************************************************************/ static int ixgb_setup_receive_structures(struct adapter * adapter) { bzero((void *)adapter->rx_desc_base, (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); if (ixgb_allocate_receive_structures(adapter)) return ENOMEM; /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; adapter->next_rx_desc_to_use = 0; return (0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void ixgb_initialize_receive_unit(struct adapter * adapter) { u_int32_t reg_rctl; u_int32_t reg_rxcsum; u_int32_t reg_rxdctl; struct ifnet *ifp; u_int64_t rdba = adapter->rxdma.dma_paddr; ifp = adapter->ifp; /* * Make sure receives are disabled while setting up the descriptor * ring */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); /* Set the Receive Delay Timer Register */ IXGB_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * sizeof(struct ixgb_rx_desc)); /* Setup the HW Rx Head and Tail Descriptor Pointers */ IXGB_WRITE_REG(&adapter->hw, RDH, 0); IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); adapter->raidc = 1; if (adapter->raidc) { uint32_t raidc; uint8_t poll_threshold; #define IXGB_RAIDC_POLL_DEFAULT 120 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); poll_threshold >>= 1; poll_threshold &= 0x3F; raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | poll_threshold; IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); } /* Enable Receive Checksum Offload for TCP and UDP ? */ if (ifp->if_capenable & IFCAP_RXCSUM) { reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); reg_rxcsum |= IXGB_RXCSUM_TUOFL; IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); } /* Setup the Receive Control Register */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | IXGB_RCTL_CFF | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); switch (adapter->rx_buffer_len) { default: case IXGB_RXBUFFER_2048: reg_rctl |= IXGB_RCTL_BSIZE_2048; break; case IXGB_RXBUFFER_4096: reg_rctl |= IXGB_RCTL_BSIZE_4096; break; case IXGB_RXBUFFER_8192: reg_rctl |= IXGB_RCTL_BSIZE_8192; break; case IXGB_RXBUFFER_16384: reg_rctl |= IXGB_RCTL_BSIZE_16384; break; } reg_rctl |= IXGB_RCTL_RXEN; /* Enable Receives */ IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void ixgb_free_receive_structures(struct adapter * adapter) { struct ixgb_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->map != NULL) { bus_dmamap_unload(adapter->rxtag, rx_buffer->map); bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); } if (rx_buffer->m_head != NULL) m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ static int ixgb_process_receive_interrupts(struct adapter * adapter, int count) { struct ifnet *ifp; struct mbuf *mp; #if __FreeBSD_version < 500000 struct ether_header *eh; #endif int eop = 0; int len; u_int8_t accept_frame = 0; int i; int next_to_use = 0; int eop_desc; int rx_npkts = 0; /* Pointer to the receive descriptor being examined. */ struct ixgb_rx_desc *current_desc; IXGB_LOCK_ASSERT(adapter); ifp = adapter->ifp; i = adapter->next_rx_desc_to_check; next_to_use = adapter->next_rx_desc_to_use; eop_desc = adapter->next_rx_desc_to_check; current_desc = &adapter->rx_desc_base[i]; if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { #ifdef _SV_ adapter->no_pkts_avail++; #endif return (rx_npkts); } while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { mp = adapter->rx_buffer_area[i].m_head; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { count--; eop = 1; } else { eop = 0; } len = current_desc->length; if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE)) { accept_frame = 0; } if (accept_frame) { /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { eop_desc = i; adapter->fmp->m_pkthdr.rcvif = ifp; #if __FreeBSD_version < 500000 eh = mtod(adapter->fmp, struct ether_header *); /* Remove ethernet header from mbuf */ m_adj(adapter->fmp, sizeof(struct ether_header)); ixgb_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(eh, adapter->fmp, current_desc->special); else ether_input(ifp, eh, adapter->fmp); #else ixgb_receive_checksum(adapter, current_desc, adapter->fmp); #if __FreeBSD_version < 700000 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(ifp, adapter->fmp, current_desc->special); #else if (current_desc->status & IXGB_RX_DESC_STATUS_VP) { adapter->fmp->m_pkthdr.ether_vtag = current_desc->special; adapter->fmp->m_flags |= M_VLANTAG; } #endif if (adapter->fmp != NULL) { IXGB_UNLOCK(adapter); (*ifp->if_input) (ifp, adapter->fmp); IXGB_LOCK(adapter); rx_npkts++; } #endif adapter->fmp = NULL; adapter->lmp = NULL; } adapter->rx_buffer_area[i].m_head = NULL; } else { adapter->dropped_pkts++; if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } /* Zero out the receive descriptors status */ current_desc->status = 0; /* Advance our pointers to the next descriptor */ if (++i == adapter->num_rx_desc) { i = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_check = i; if (--i < 0) i = (adapter->num_rx_desc - 1); /* * 82597EX: Workaround for redundent write back in receive descriptor ring (causes * memory corruption). Avoid using and re-submitting the most recently received RX * descriptor back to hardware. * * if(Last written back descriptor == EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptor * back to hardware. * if(Last written back descriptor != EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptors * till last EOP bit set descriptor. */ if (eop_desc != i) { if (++eop_desc == adapter->num_rx_desc) eop_desc = 0; i = eop_desc; } /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ while (next_to_use != i) { current_desc = &adapter->rx_desc_base[next_to_use]; if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { mp = adapter->rx_buffer_area[next_to_use].m_head; ixgb_get_buf(next_to_use, adapter, mp); } else { if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) break; } /* Advance our pointers to the next descriptor */ if (++next_to_use == adapter->num_rx_desc) { next_to_use = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_use = next_to_use; if (--next_to_use < 0) next_to_use = (adapter->num_rx_desc - 1); /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); return (rx_npkts); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixgb_receive_checksum(struct adapter * adapter, struct ixgb_rx_desc * rx_desc, struct mbuf * mp) { if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } return; } static void ixgb_enable_vlans(struct adapter * adapter) { uint32_t ctrl; ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); ctrl |= IXGB_CTRL0_VME; IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); return; } static void ixgb_enable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); return; } static void ixgb_disable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMC, ~0); return; } void ixgb_write_pci_cfg(struct ixgb_hw * hw, uint32_t reg, uint16_t * value) { pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, *value, 2); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void ixgb_update_stats_counters(struct adapter * adapter) { adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); } static uint64_t ixgb_get_counter(struct ifnet *ifp, ift_counter cnt) { struct adapter *adapter; adapter = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: return (adapter->stats.gprcl); case IFCOUNTER_OPACKETS: return ( adapter->stats.gptcl); case IFCOUNTER_IBYTES: return (adapter->stats.gorcl); case IFCOUNTER_OBYTES: return (adapter->stats.gotcl); case IFCOUNTER_IMCASTS: return ( adapter->stats.mprcl); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.crcerrs + adapter->stats.rnbc + adapter->stats.mpc + adapter->stats.rlec); default: return (if_get_counter_default(ifp, cnt)); } } /********************************************************************** * * This routine is called only when ixgb_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void ixgb_print_hw_stats(struct adapter * adapter) { char buf_speed[100], buf_type[100]; ixgb_bus_speed bus_speed; ixgb_bus_type bus_type; device_t dev; dev = adapter->dev; #ifdef _SV_ device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail); device_printf(dev, "CleanTxInterrupts = %ld\n", adapter->clean_tx_interrupts); device_printf(dev, "ICR RXDMT0 = %lld\n", (long long)adapter->sv_stats.icr_rxdmt0); device_printf(dev, "ICR RXO = %lld\n", (long long)adapter->sv_stats.icr_rxo); device_printf(dev, "ICR RXT0 = %lld\n", (long long)adapter->sv_stats.icr_rxt0); device_printf(dev, "ICR TXDW = %lld\n", (long long)adapter->sv_stats.icr_TXDW); #endif /* _SV_ */ bus_speed = adapter->hw.bus.speed; bus_type = adapter->hw.bus.type; sprintf(buf_speed, bus_speed == ixgb_bus_speed_33 ? "33MHz" : bus_speed == ixgb_bus_speed_66 ? "66MHz" : bus_speed == ixgb_bus_speed_100 ? "100MHz" : bus_speed == ixgb_bus_speed_133 ? "133MHz" : "UNKNOWN"); device_printf(dev, "PCI_Bus_Speed = %s\n", buf_speed); sprintf(buf_type, bus_type == ixgb_bus_type_pci ? "PCI" : bus_type == ixgb_bus_type_pcix ? "PCI-X" : "UNKNOWN"); device_printf(dev, "PCI_Bus_Type = %s\n", buf_type); device_printf(dev, "Tx Descriptors not Avail1 = %ld\n", adapter->no_tx_desc_avail1); device_printf(dev, "Tx Descriptors not Avail2 = %ld\n", adapter->no_tx_desc_avail2); device_printf(dev, "Std Mbuf Failed = %ld\n", adapter->mbuf_alloc_failed); device_printf(dev, "Std Cluster Failed = %ld\n", adapter->mbuf_cluster_failed); device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc); device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc); device_printf(dev, "Receive No Buffers = %lld\n", (long long)adapter->stats.rnbc); device_printf(dev, "Receive length errors = %lld\n", (long long)adapter->stats.rlec); device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs); device_printf(dev, "Driver dropped packets = %ld\n", adapter->dropped_pkts); device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc); device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc); device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc); device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc); device_printf(dev, "Good Packets Rcvd = %lld\n", (long long)adapter->stats.gprcl); device_printf(dev, "Good Packets Xmtd = %lld\n", (long long)adapter->stats.gptcl); device_printf(dev, "Jumbo frames recvd = %lld\n", (long long)adapter->stats.jprcl); device_printf(dev, "Jumbo frames Xmtd = %lld\n", (long long)adapter->stats.jptcl); return; } static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *) arg1; ixgb_print_hw_stats(adapter); } return error; } Index: head/sys/dev/nxge/if_nxge.c =================================================================== --- head/sys/dev/nxge/if_nxge.c (revision 331828) +++ head/sys/dev/nxge/if_nxge.c (revision 331829) @@ -1,3537 +1,3531 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2007 Neterion, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include int copyright_print = 0; int hal_driver_init_count = 0; size_t size = sizeof(int); static void inline xge_flush_txds(xge_hal_channel_h); /** * xge_probe * Probes for Xframe devices * * @dev Device handle * * Returns * BUS_PROBE_DEFAULT if device is supported * ENXIO if device is not supported */ int xge_probe(device_t dev) { int devid = pci_get_device(dev); int vendorid = pci_get_vendor(dev); int retValue = ENXIO; if(vendorid == XGE_PCI_VENDOR_ID) { if((devid == XGE_PCI_DEVICE_ID_XENA_2) || (devid == XGE_PCI_DEVICE_ID_HERC_2)) { if(!copyright_print) { xge_os_printf(XGE_COPYRIGHT); copyright_print = 1; } device_set_desc_copy(dev, "Neterion Xframe 10 Gigabit Ethernet Adapter"); retValue = BUS_PROBE_DEFAULT; } } return retValue; } /** * xge_init_params * Sets HAL parameter values (from kenv). * * @dconfig Device Configuration * @dev Device Handle */ void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev) { int qindex, tindex, revision; device_t checkdev; xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); dconfig->mtu = XGE_DEFAULT_INITIAL_MTU; dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED; dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS; dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD; dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN; dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE; XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso, XGE_DEFAULT_ENABLED_TSO); XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro, XGE_DEFAULT_ENABLED_LRO); XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi, XGE_DEFAULT_ENABLED_MSI); XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer, XGE_DEFAULT_LATENCY_TIMER); XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans, XGE_DEFAULT_MAX_SPLITS_TRANS); XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count, XGE_DEFAULT_MMRB_COUNT); XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits, XGE_DEFAULT_SHARED_SPLITS); XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt, XGE_DEFAULT_ISR_POLLING_CNT); XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig), stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC); XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period, XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD); XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period, XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD); XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en, XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN); XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en, XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN); XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time, XGE_DEFAULT_MAC_RMAC_PAUSE_TIME); XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3", mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3); XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7", mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7); XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size, XGE_DEFAULT_FIFO_MEMBLOCK_SIZE); XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold, XGE_DEFAULT_FIFO_RESERVE_THRESHOLD); XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags, XGE_DEFAULT_FIFO_MAX_FRAGS); for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex, XGE_DEFAULT_FIFO_QUEUE_INTR); XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex, XGE_DEFAULT_FIFO_QUEUE_MAX); XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial, qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL); for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) { dconfig->fifo.queue[qindex].tti[tindex].enabled = 1; dconfig->fifo.queue[qindex].configured = 1; XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a", urange_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b", urange_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c", urange_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a", ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b", ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c", ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C); XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d", ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D); XGE_GET_PARAM_FIFO_QUEUE_TTI( "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN); XGE_GET_PARAM_FIFO_QUEUE_TTI( "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN); XGE_GET_PARAM_FIFO_QUEUE_TTI( "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US); } } XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size, XGE_DEFAULT_RING_MEMBLOCK_SIZE); XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag, XGE_DEFAULT_RING_STRIP_VLAN_TAG); XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode, XGE_DEFAULT_BUFFER_MODE); if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) || (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) { xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2"); lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1; } for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) { dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU; dconfig->ring.queue[qindex].priority = 0; dconfig->ring.queue[qindex].configured = 1; dconfig->ring.queue[qindex].buffer_mode = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ? XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode; XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex, XGE_DEFAULT_RING_QUEUE_MAX); XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial, qindex, XGE_DEFAULT_RING_QUEUE_INITIAL); XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb", dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB); XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts", indicate_max_pkts, qindex, XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS); XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us", backoff_interval_us, qindex, XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d, qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en", timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us", timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a", urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b", urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B); XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c", urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C); } if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) { xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags) xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d", (int)(PAGE_SIZE / 32)) xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32)) dconfig->fifo.max_frags = (PAGE_SIZE / 32); } checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE); if(checkdev != NULL) { /* Check Revision for 0x12 */ revision = pci_read_config(checkdev, xge_offsetof(xge_hal_pci_config_t, revision), 1); if(revision <= 0x12) { /* Set mmrb_count to 1k and max splits = 2 */ dconfig->mmrb_count = 1; dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION; } } } /** * xge_buffer_sizes_set * Set buffer sizes based on Rx buffer mode * * @lldev Per-adapter Data * @buffer_mode Rx Buffer Mode */ void xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu) { int index = 0; int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE; int buffer_size = mtu + frame_header; xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len)); if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) lldev->rxd_mbuf_len[buffer_mode - 1] = mtu; lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header; if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE; if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { index = 2; buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE; while(buffer_size > MJUMPAGESIZE) { lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE; buffer_size -= MJUMPAGESIZE; } XGE_ALIGN_TO(buffer_size, 128); lldev->rxd_mbuf_len[index] = buffer_size; lldev->rxd_mbuf_cnt = index + 1; } for(index = 0; index < buffer_mode; index++) xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index, lldev->rxd_mbuf_len[index]); } /** * xge_buffer_mode_init * Init Rx buffer mode * * @lldev Per-adapter Data * @mtu Interface MTU */ void xge_buffer_mode_init(xge_lldev_t *lldev, int mtu) { int index = 0, buffer_size = 0; xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring); buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE; if(lldev->enabled_lro) (lldev->ifnetp)->if_capenable |= IFCAP_LRO; else (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO; lldev->rxd_mbuf_cnt = lldev->buffer_mode; if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) { XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3); ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B; } else { XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode); ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A; } xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu); xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device), ((lldev->enabled_tso) ? "Enabled":"Disabled")); xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device), ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled"); xge_os_printf("%s: Rx %d Buffer Mode Enabled", device_get_nameunit(lldev->device), lldev->buffer_mode); } /** * xge_driver_initialize * Initializes HAL driver (common for all devices) * * Returns * XGE_HAL_OK if success * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid */ int xge_driver_initialize(void) { xge_hal_uld_cbs_t uld_callbacks; xge_hal_driver_config_t driver_config; xge_hal_status_e status = XGE_HAL_OK; /* Initialize HAL driver */ if(!hal_driver_init_count) { xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t)); xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t)); /* * Initial and maximum size of the queue used to store the events * like Link up/down (xge_hal_event_e) */ driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL; driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX; uld_callbacks.link_up = xge_callback_link_up; uld_callbacks.link_down = xge_callback_link_down; uld_callbacks.crit_err = xge_callback_crit_err; uld_callbacks.event = xge_callback_event; status = xge_hal_driver_initialize(&driver_config, &uld_callbacks); if(status != XGE_HAL_OK) { XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed", xdi_out, status); } } hal_driver_init_count = hal_driver_init_count + 1; xge_hal_driver_debug_module_mask_set(0xffffffff); xge_hal_driver_debug_level_set(XGE_TRACE); xdi_out: return status; } /** * xge_media_init * Initializes, adds and sets media * * @devc Device Handle */ void xge_media_init(device_t devc) { xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc); /* Initialize Media */ ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change, xge_ifmedia_status); /* Add supported media */ ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL); ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL); /* Set media */ ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO); } /** * xge_pci_space_save * Save PCI configuration space * * @dev Device Handle */ void xge_pci_space_save(device_t dev) { struct pci_devinfo *dinfo = NULL; dinfo = device_get_ivars(dev); xge_trace(XGE_TRACE, "Saving PCI configuration space"); pci_cfg_save(dev, dinfo, 0); } /** * xge_pci_space_restore * Restore saved PCI configuration space * * @dev Device Handle */ void xge_pci_space_restore(device_t dev) { struct pci_devinfo *dinfo = NULL; dinfo = device_get_ivars(dev); xge_trace(XGE_TRACE, "Restoring PCI configuration space"); pci_cfg_restore(dev, dinfo); } /** * xge_msi_info_save * Save MSI info * * @lldev Per-adapter Data */ void xge_msi_info_save(xge_lldev_t * lldev) { xge_os_pci_read16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_control), &lldev->msi_info.msi_control); xge_os_pci_read32(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address), &lldev->msi_info.msi_lower_address); xge_os_pci_read32(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address), &lldev->msi_info.msi_higher_address); xge_os_pci_read16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_data), &lldev->msi_info.msi_data); } /** * xge_msi_info_restore * Restore saved MSI info * * @dev Device Handle */ void xge_msi_info_restore(xge_lldev_t *lldev) { /* * If interface is made down and up, traffic fails. It was observed that * MSI information were getting reset on down. Restoring them. */ xge_os_pci_write16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_control), lldev->msi_info.msi_control); xge_os_pci_write32(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address), lldev->msi_info.msi_lower_address); xge_os_pci_write32(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address), lldev->msi_info.msi_higher_address); xge_os_pci_write16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_data), lldev->msi_info.msi_data); } /** * xge_init_mutex * Initializes mutexes used in driver * * @lldev Per-adapter Data */ void xge_mutex_init(xge_lldev_t *lldev) { int qindex; sprintf(lldev->mtx_name_drv, "%s_drv", device_get_nameunit(lldev->device)); mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK, MTX_DEF); for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d", device_get_nameunit(lldev->device), qindex); mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL, MTX_DEF); } } /** * xge_mutex_destroy * Destroys mutexes used in driver * * @lldev Per-adapter Data */ void xge_mutex_destroy(xge_lldev_t *lldev) { int qindex; for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) mtx_destroy(&lldev->mtx_tx[qindex]); mtx_destroy(&lldev->mtx_drv); } /** * xge_print_info * Print device and driver information * * @lldev Per-adapter Data */ void xge_print_info(xge_lldev_t *lldev) { device_t dev = lldev->device; xge_hal_device_t *hldev = lldev->devh; xge_hal_status_e status = XGE_HAL_OK; u64 val64 = 0; const char *xge_pci_bus_speeds[17] = { "PCI 33MHz Bus", "PCI 66MHz Bus", "PCIX(M1) 66MHz Bus", "PCIX(M1) 100MHz Bus", "PCIX(M1) 133MHz Bus", "PCIX(M2) 133MHz Bus", "PCIX(M2) 200MHz Bus", "PCIX(M2) 266MHz Bus", "PCIX(M1) Reserved", "PCIX(M1) 66MHz Bus (Not Supported)", "PCIX(M1) 100MHz Bus (Not Supported)", "PCIX(M1) 133MHz Bus (Not Supported)", "PCIX(M2) Reserved", "PCIX 533 Reserved", "PCI Basic Mode", "PCIX Basic Mode", "PCI Invalid Mode" }; xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s", device_get_nameunit(dev), ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"), hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION); xge_os_printf("%s: Serial Number %s", device_get_nameunit(dev), hldev->vpd_data.serial_num); if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) { status = xge_hal_mgmt_reg_read(hldev, 0, xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64); if(status != XGE_HAL_OK) xge_trace(XGE_ERR, "Error for getting bus speed"); xge_os_printf("%s: Adapter is on %s bit %s", device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"), (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)])); } xge_os_printf("%s: Using %s Interrupts", device_get_nameunit(dev), (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line"); } /** * xge_create_dma_tags * Creates DMA tags for both Tx and Rx * * @dev Device Handle * * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors) */ xge_hal_status_e xge_create_dma_tags(device_t dev) { xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); xge_hal_status_e status = XGE_HAL_FAIL; int mtu = (lldev->ifnetp)->if_mtu, maxsize; /* DMA tag for Tx */ status = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent */ PAGE_SIZE, /* Alignment */ 0, /* Bounds */ BUS_SPACE_MAXADDR, /* Low Address */ BUS_SPACE_MAXADDR, /* High Address */ NULL, /* Filter Function */ NULL, /* Filter Function Arguments */ MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */ XGE_MAX_SEGS, /* Number of Segments */ MCLBYTES, /* Maximum Segment Size */ BUS_DMA_ALLOCNOW, /* Flags */ NULL, /* Lock Function */ NULL, /* Lock Function Arguments */ (&lldev->dma_tag_tx)); /* DMA Tag */ if(status != 0) goto _exit; maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE; if(maxsize <= MCLBYTES) { maxsize = MCLBYTES; } else { if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) maxsize = MJUMPAGESIZE; else maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES; } /* DMA tag for Rx */ status = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent */ PAGE_SIZE, /* Alignment */ 0, /* Bounds */ BUS_SPACE_MAXADDR, /* Low Address */ BUS_SPACE_MAXADDR, /* High Address */ NULL, /* Filter Function */ NULL, /* Filter Function Arguments */ maxsize, /* Maximum Size */ 1, /* Number of Segments */ maxsize, /* Maximum Segment Size */ BUS_DMA_ALLOCNOW, /* Flags */ NULL, /* Lock Function */ NULL, /* Lock Function Arguments */ (&lldev->dma_tag_rx)); /* DMA Tag */ if(status != 0) goto _exit1; status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT, &lldev->extra_dma_map); if(status != 0) goto _exit2; status = XGE_HAL_OK; goto _exit; _exit2: status = bus_dma_tag_destroy(lldev->dma_tag_rx); if(status != 0) xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); _exit1: status = bus_dma_tag_destroy(lldev->dma_tag_tx); if(status != 0) xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); status = XGE_HAL_FAIL; _exit: return status; } /** * xge_confirm_changes * Disables and Enables interface to apply requested change * * @lldev Per-adapter Data * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0) * * Returns 0 or Error Number */ void xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option) { if(lldev->initialized == 0) goto _exit1; mtx_lock(&lldev->mtx_drv); if_down(lldev->ifnetp); xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); if(option == XGE_SET_MTU) (lldev->ifnetp)->if_mtu = lldev->mtu; else xge_buffer_mode_init(lldev, lldev->mtu); xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); if_up(lldev->ifnetp); mtx_unlock(&lldev->mtx_drv); goto _exit; _exit1: /* Request was to change MTU and device not initialized */ if(option == XGE_SET_MTU) { (lldev->ifnetp)->if_mtu = lldev->mtu; xge_buffer_mode_init(lldev, lldev->mtu); } _exit: return; } /** * xge_change_lro_status * Enable/Disable LRO feature * * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments * * Returns 0 or error number. */ static int xge_change_lro_status(SYSCTL_HANDLER_ARGS) { xge_lldev_t *lldev = (xge_lldev_t *)arg1; int request = lldev->enabled_lro, status = XGE_HAL_OK; status = sysctl_handle_int(oidp, &request, arg2, req); if((status != XGE_HAL_OK) || (!req->newptr)) goto _exit; if((request < 0) || (request > 1)) { status = EINVAL; goto _exit; } /* Return if current and requested states are same */ if(request == lldev->enabled_lro){ xge_trace(XGE_ERR, "LRO is already %s", ((request) ? "enabled" : "disabled")); goto _exit; } lldev->enabled_lro = request; xge_confirm_changes(lldev, XGE_CHANGE_LRO); arg2 = lldev->enabled_lro; _exit: return status; } /** * xge_add_sysctl_handlers * Registers sysctl parameter value update handlers * * @lldev Per-adapter data */ void xge_add_sysctl_handlers(xge_lldev_t *lldev) { struct sysctl_ctx_list *context_list = device_get_sysctl_ctx(lldev->device); struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device); SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO, "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0, xge_change_lro_status, "I", "Enable or disable LRO feature"); } /** * xge_attach * Connects driver to the system if probe was success * * @dev Device Handle */ int xge_attach(device_t dev) { xge_hal_device_config_t *device_config; xge_hal_device_attr_t attr; xge_lldev_t *lldev; xge_hal_device_t *hldev; xge_pci_info_t *pci_info; struct ifnet *ifnetp; int rid, rid0, rid1, error; int msi_count = 0, status = XGE_HAL_OK; int enable_msi = XGE_HAL_INTR_MODE_IRQLINE; device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t)); if(!device_config) { XGE_EXIT_ON_ERR("Memory allocation for device configuration failed", attach_out_config, ENOMEM); } lldev = (xge_lldev_t *) device_get_softc(dev); if(!lldev) { XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM); } lldev->device = dev; xge_mutex_init(lldev); error = xge_driver_initialize(); if(error != XGE_HAL_OK) { xge_resources_free(dev, xge_free_mutex); XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO); } /* HAL device */ hldev = (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t)); if(!hldev) { xge_resources_free(dev, xge_free_terminate_hal_driver); XGE_EXIT_ON_ERR("Memory allocation for HAL device failed", attach_out, ENOMEM); } lldev->devh = hldev; /* Our private structure */ pci_info = (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t)); if(!pci_info) { xge_resources_free(dev, xge_free_hal_device); XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed", attach_out, ENOMEM); } lldev->pdev = pci_info; pci_info->device = dev; /* Set bus master */ pci_enable_busmaster(dev); /* Get virtual address for BAR0 */ rid0 = PCIR_BAR(0); pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0, RF_ACTIVE); if(pci_info->regmap0 == NULL) { xge_resources_free(dev, xge_free_pci_info); XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed", attach_out, ENOMEM); } attr.bar0 = (char *)pci_info->regmap0; pci_info->bar0resource = (xge_bus_resource_t*) xge_os_malloc(NULL, sizeof(xge_bus_resource_t)); if(pci_info->bar0resource == NULL) { xge_resources_free(dev, xge_free_bar0); XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed", attach_out, ENOMEM); } ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag = rman_get_bustag(pci_info->regmap0); ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle = rman_get_bushandle(pci_info->regmap0); ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr = pci_info->regmap0; /* Get virtual address for BAR1 */ rid1 = PCIR_BAR(2); pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1, RF_ACTIVE); if(pci_info->regmap1 == NULL) { xge_resources_free(dev, xge_free_bar0_resource); XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed", attach_out, ENOMEM); } attr.bar1 = (char *)pci_info->regmap1; pci_info->bar1resource = (xge_bus_resource_t*) xge_os_malloc(NULL, sizeof(xge_bus_resource_t)); if(pci_info->bar1resource == NULL) { xge_resources_free(dev, xge_free_bar1); XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed", attach_out, ENOMEM); } ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag = rman_get_bustag(pci_info->regmap1); ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle = rman_get_bushandle(pci_info->regmap1); ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr = pci_info->regmap1; /* Save PCI config space */ xge_pci_space_save(dev); attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource; attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource; attr.irqh = lldev->irqhandle; attr.cfgh = pci_info; attr.pdev = pci_info; /* Initialize device configuration parameters */ xge_init_params(device_config, dev); rid = 0; if(lldev->enabled_msi) { /* Number of MSI messages supported by device */ msi_count = pci_msi_count(dev); if(msi_count > 1) { /* Device supports MSI */ if(bootverbose) { xge_trace(XGE_ERR, "MSI count: %d", msi_count); xge_trace(XGE_ERR, "Now, driver supporting 1 message"); } msi_count = 1; error = pci_alloc_msi(dev, &msi_count); if(error == 0) { if(bootverbose) xge_trace(XGE_ERR, "Allocated messages: %d", msi_count); enable_msi = XGE_HAL_INTR_MODE_MSI; rid = 1; } else { if(bootverbose) xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error); } } } lldev->enabled_msi = enable_msi; /* Allocate resource for irq */ lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); if(lldev->irq == NULL) { xge_trace(XGE_ERR, "Allocating irq resource for %s failed", ((rid == 0) ? "line interrupt" : "MSI")); if(rid == 1) { error = pci_release_msi(dev); if(error != 0) { xge_trace(XGE_ERR, "Releasing MSI resources failed %d", error); xge_trace(XGE_ERR, "Requires reboot to use MSI again"); } xge_trace(XGE_ERR, "Trying line interrupts"); rid = 0; lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE; lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); } if(lldev->irq == NULL) { xge_trace(XGE_ERR, "Allocating irq resource failed"); xge_resources_free(dev, xge_free_bar1_resource); status = ENOMEM; goto attach_out; } } device_config->intr_mode = lldev->enabled_msi; if(bootverbose) { xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid, lldev->enabled_msi, msi_count); } /* Initialize HAL device */ error = xge_hal_device_initialize(hldev, &attr, device_config); if(error != XGE_HAL_OK) { xge_resources_free(dev, xge_free_irq_resource); XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out, ENXIO); } xge_hal_device_private_set(hldev, lldev); error = xge_interface_setup(dev); if(error != 0) { status = error; goto attach_out; } ifnetp = lldev->ifnetp; ifnetp->if_mtu = device_config->mtu; xge_media_init(dev); /* Associate interrupt handler with the device */ if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { error = bus_setup_intr(dev, lldev->irq, (INTR_TYPE_NET | INTR_MPSAFE), #if __FreeBSD_version > 700030 NULL, #endif xge_isr_msi, lldev, &lldev->irqhandle); xge_msi_info_save(lldev); } else { error = bus_setup_intr(dev, lldev->irq, (INTR_TYPE_NET | INTR_MPSAFE), #if __FreeBSD_version > 700030 xge_isr_filter, #endif xge_isr_line, lldev, &lldev->irqhandle); } if(error != 0) { xge_resources_free(dev, xge_free_media_interface); XGE_EXIT_ON_ERR("Associating interrupt handler with device failed", attach_out, ENXIO); } xge_print_info(lldev); xge_add_sysctl_handlers(lldev); xge_buffer_mode_init(lldev, device_config->mtu); attach_out: xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t)); attach_out_config: return status; } /** * xge_resources_free * Undo what-all we did during load/attach * * @dev Device Handle * @error Identifies what-all to undo */ void xge_resources_free(device_t dev, xge_lables_e error) { xge_lldev_t *lldev; xge_pci_info_t *pci_info; xge_hal_device_t *hldev; int rid, status; /* LL Device */ lldev = (xge_lldev_t *) device_get_softc(dev); pci_info = lldev->pdev; /* HAL Device */ hldev = lldev->devh; switch(error) { case xge_free_all: /* Teardown interrupt handler - device association */ bus_teardown_intr(dev, lldev->irq, lldev->irqhandle); case xge_free_media_interface: /* Media */ ifmedia_removeall(&lldev->media); /* Detach Ether */ ether_ifdetach(lldev->ifnetp); if_free(lldev->ifnetp); xge_hal_device_private_set(hldev, NULL); xge_hal_device_disable(hldev); case xge_free_terminate_hal_device: /* HAL Device */ xge_hal_device_terminate(hldev); case xge_free_irq_resource: /* Release IRQ resource */ bus_release_resource(dev, SYS_RES_IRQ, ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1), lldev->irq); if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { status = pci_release_msi(dev); if(status != 0) { if(bootverbose) { xge_trace(XGE_ERR, "pci_release_msi returned %d", status); } } } case xge_free_bar1_resource: /* Restore PCI configuration space */ xge_pci_space_restore(dev); /* Free bar1resource */ xge_os_free(NULL, pci_info->bar1resource, sizeof(xge_bus_resource_t)); case xge_free_bar1: /* Release BAR1 */ rid = PCIR_BAR(2); bus_release_resource(dev, SYS_RES_MEMORY, rid, pci_info->regmap1); case xge_free_bar0_resource: /* Free bar0resource */ xge_os_free(NULL, pci_info->bar0resource, sizeof(xge_bus_resource_t)); case xge_free_bar0: /* Release BAR0 */ rid = PCIR_BAR(0); bus_release_resource(dev, SYS_RES_MEMORY, rid, pci_info->regmap0); case xge_free_pci_info: /* Disable Bus Master */ pci_disable_busmaster(dev); /* Free pci_info_t */ lldev->pdev = NULL; xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t)); case xge_free_hal_device: /* Free device configuration struct and HAL device */ xge_os_free(NULL, hldev, sizeof(xge_hal_device_t)); case xge_free_terminate_hal_driver: /* Terminate HAL driver */ hal_driver_init_count = hal_driver_init_count - 1; if(!hal_driver_init_count) { xge_hal_driver_terminate(); } case xge_free_mutex: xge_mutex_destroy(lldev); } } /** * xge_detach * Detaches driver from the Kernel subsystem * * @dev Device Handle */ int xge_detach(device_t dev) { xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); if(lldev->in_detach == 0) { lldev->in_detach = 1; xge_stop(lldev); xge_resources_free(dev, xge_free_all); } return 0; } /** * xge_shutdown * To shutdown device before system shutdown * * @dev Device Handle */ int xge_shutdown(device_t dev) { xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev); xge_stop(lldev); return 0; } /** * xge_interface_setup * Setup interface * * @dev Device Handle * * Returns 0 on success, ENXIO/ENOMEM on failure */ int xge_interface_setup(device_t dev) { u8 mcaddr[ETHER_ADDR_LEN]; xge_hal_status_e status; xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev); struct ifnet *ifnetp; xge_hal_device_t *hldev = lldev->devh; /* Get the MAC address of the device */ status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr); if(status != XGE_HAL_OK) { xge_resources_free(dev, xge_free_terminate_hal_device); XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO); } /* Get interface ifnet structure for this Ether device */ ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER); if(ifnetp == NULL) { xge_resources_free(dev, xge_free_terminate_hal_device); XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM); } /* Initialize interface ifnet structure */ if_initname(ifnetp, device_get_name(dev), device_get_unit(dev)); ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU; ifnetp->if_baudrate = XGE_BAUDRATE; ifnetp->if_init = xge_init; ifnetp->if_softc = lldev; ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifnetp->if_ioctl = xge_ioctl; ifnetp->if_start = xge_send; /* TODO: Check and assign optimal value */ ifnetp->if_snd.ifq_maxlen = ifqmaxlen; ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM; if(lldev->enabled_tso) ifnetp->if_capabilities |= IFCAP_TSO4; if(lldev->enabled_lro) ifnetp->if_capabilities |= IFCAP_LRO; ifnetp->if_capenable = ifnetp->if_capabilities; /* Attach the interface */ ether_ifattach(ifnetp, mcaddr); ifsetup_out: return status; } /** * xge_callback_link_up * Callback for Link-up indication from HAL * * @userdata Per-adapter data */ void xge_callback_link_up(void *userdata) { xge_lldev_t *lldev = (xge_lldev_t *)userdata; struct ifnet *ifnetp = lldev->ifnetp; ifnetp->if_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifnetp, LINK_STATE_UP); } /** * xge_callback_link_down * Callback for Link-down indication from HAL * * @userdata Per-adapter data */ void xge_callback_link_down(void *userdata) { xge_lldev_t *lldev = (xge_lldev_t *)userdata; struct ifnet *ifnetp = lldev->ifnetp; ifnetp->if_flags |= IFF_DRV_OACTIVE; if_link_state_change(ifnetp, LINK_STATE_DOWN); } /** * xge_callback_crit_err * Callback for Critical error indication from HAL * * @userdata Per-adapter data * @type Event type (Enumerated hardware error) * @serr_data Hardware status */ void xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data) { xge_trace(XGE_ERR, "Critical Error"); xge_reset(userdata); } /** * xge_callback_event * Callback from HAL indicating that some event has been queued * * @item Queued event item */ void xge_callback_event(xge_queue_item_t *item) { xge_lldev_t *lldev = NULL; xge_hal_device_t *hldev = NULL; struct ifnet *ifnetp = NULL; hldev = item->context; lldev = xge_hal_device_private(hldev); ifnetp = lldev->ifnetp; switch((int)item->event_type) { case XGE_LL_EVENT_TRY_XMIT_AGAIN: if(lldev->initialized) { if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) { ifnetp->if_flags &= ~IFF_DRV_OACTIVE; } else { xge_queue_produce_context( xge_hal_device_queue(lldev->devh), XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh); } } break; case XGE_LL_EVENT_DEVICE_RESETTING: xge_reset(item->context); break; default: break; } } /** * xge_ifmedia_change * Media change driver callback * * @ifnetp Interface Handle * * Returns 0 if media is Ether else EINVAL */ int xge_ifmedia_change(struct ifnet *ifnetp) { xge_lldev_t *lldev = ifnetp->if_softc; struct ifmedia *ifmediap = &lldev->media; return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0; } /** * xge_ifmedia_status * Media status driver callback * * @ifnetp Interface Handle * @ifmr Interface Media Settings */ void xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr) { xge_hal_status_e status; u64 regvalue; xge_lldev_t *lldev = ifnetp->if_softc; xge_hal_device_t *hldev = lldev->devh; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; status = xge_hal_mgmt_reg_read(hldev, 0, xge_offsetof(xge_hal_pci_bar0_t, adapter_status), ®value); if(status != XGE_HAL_OK) { xge_trace(XGE_TRACE, "Getting adapter status failed"); goto _exit; } if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; if_link_state_change(ifnetp, LINK_STATE_UP); } else { if_link_state_change(ifnetp, LINK_STATE_DOWN); } _exit: return; } /** * xge_ioctl_stats * IOCTL to get statistics * * @lldev Per-adapter data * @ifreqp Interface request */ int xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp) { xge_hal_status_e status = XGE_HAL_OK; char cmd, mode; void *info = NULL; int retValue = EINVAL; cmd = fubyte(ifr_data_get_ptr(ifreqp)); if (cmd == -1) return (EFAULT); switch(cmd) { case XGE_QUERY_STATS: mtx_lock(&lldev->mtx_drv); status = xge_hal_stats_hw(lldev->devh, (xge_hal_stats_hw_info_t **)&info); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(info, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_stats_hw_info_t)) == 0) retValue = 0; } else { xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)", status); } break; case XGE_QUERY_PCICONF: info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t)); if(info != NULL) { mtx_lock(&lldev->mtx_drv); status = xge_hal_mgmt_pci_config(lldev->devh, info, sizeof(xge_hal_pci_config_t)); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(info, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_pci_config_t)) == 0) retValue = 0; } else { xge_trace(XGE_ERR, "Getting PCI configuration failed (%d)", status); } xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t)); } break; case XGE_QUERY_DEVSTATS: info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t)); if(info != NULL) { mtx_lock(&lldev->mtx_drv); status =xge_hal_mgmt_device_stats(lldev->devh, info, sizeof(xge_hal_stats_device_info_t)); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(info, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_stats_device_info_t)) == 0) retValue = 0; } else { xge_trace(XGE_ERR, "Getting device info failed (%d)", status); } xge_os_free(NULL, info, sizeof(xge_hal_stats_device_info_t)); } break; case XGE_QUERY_SWSTATS: info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t)); if(info != NULL) { mtx_lock(&lldev->mtx_drv); status =xge_hal_mgmt_sw_stats(lldev->devh, info, sizeof(xge_hal_stats_sw_err_t)); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(info, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_stats_sw_err_t)) == 0) retValue = 0; } else { xge_trace(XGE_ERR, "Getting tcode statistics failed (%d)", status); } xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t)); } break; case XGE_QUERY_DRIVERSTATS: if(copyout(&lldev->driver_stats, ifr_data_get_ptr(ifreqp), sizeof(xge_driver_stats_t)) == 0) { retValue = 0; } else { xge_trace(XGE_ERR, "Copyout of driver statistics failed (%d)", status); } break; case XGE_READ_VERSION: info = xge_os_malloc(NULL, XGE_BUFFER_SIZE); if(info != NULL) { strcpy(info, XGE_DRIVER_VERSION); if(copyout(info, ifr_data_get_ptr(ifreqp), XGE_BUFFER_SIZE) == 0) retValue = 0; xge_os_free(NULL, info, XGE_BUFFER_SIZE); } break; case XGE_QUERY_DEVCONF: info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t)); if(info != NULL) { mtx_lock(&lldev->mtx_drv); status = xge_hal_mgmt_device_config(lldev->devh, info, sizeof(xge_hal_device_config_t)); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(info, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_device_config_t)) == 0) retValue = 0; } else { xge_trace(XGE_ERR, "Getting devconfig failed (%d)", status); } xge_os_free(NULL, info, sizeof(xge_hal_device_config_t)); } break; case XGE_QUERY_BUFFER_MODE: if(copyout(&lldev->buffer_mode, ifr_data_get_ptr(ifreqp), sizeof(int)) == 0) retValue = 0; break; case XGE_SET_BUFFER_MODE_1: case XGE_SET_BUFFER_MODE_2: case XGE_SET_BUFFER_MODE_5: mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N'; if(copyout(&mode, ifr_data_get_ptr(ifreqp), sizeof(mode)) == 0) retValue = 0; break; default: xge_trace(XGE_TRACE, "Nothing is matching"); retValue = ENOTTY; break; } return retValue; } /** * xge_ioctl_registers * IOCTL to get registers * * @lldev Per-adapter data * @ifreqp Interface request */ int xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp) { xge_register_t tmpdata; xge_register_t *data; xge_hal_status_e status = XGE_HAL_OK; int retValue = EINVAL, offset = 0, index = 0; int error; u64 val64 = 0; error = copyin(ifr_data_get_ptr(ifreqp), &tmpdata, sizeof(tmpdata)); if (error != 0) return (error); data = &tmpdata; /* Reading a register */ if(strcmp(data->option, "-r") == 0) { data->value = 0x0000; mtx_lock(&lldev->mtx_drv); status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset, &data->value); mtx_unlock(&lldev->mtx_drv); if(status == XGE_HAL_OK) { if(copyout(data, ifr_data_get_ptr(ifreqp), sizeof(xge_register_t)) == 0) retValue = 0; } } /* Writing to a register */ else if(strcmp(data->option, "-w") == 0) { mtx_lock(&lldev->mtx_drv); status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset, data->value); if(status == XGE_HAL_OK) { val64 = 0x0000; status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset, &val64); if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "Reading back updated register failed"); } else { if(val64 != data->value) { xge_trace(XGE_ERR, "Read and written register values mismatched"); } else retValue = 0; } } else { xge_trace(XGE_ERR, "Getting register value failed"); } mtx_unlock(&lldev->mtx_drv); } else { mtx_lock(&lldev->mtx_drv); for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG; index++, offset += 0x0008) { val64 = 0; status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64); if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "Getting register value failed"); break; } *((u64 *)((u64 *)data + index)) = val64; retValue = 0; } mtx_unlock(&lldev->mtx_drv); if(retValue == 0) { if(copyout(data, ifr_data_get_ptr(ifreqp), sizeof(xge_hal_pci_bar0_t)) != 0) { xge_trace(XGE_ERR, "Copyout of register values failed"); retValue = EINVAL; } } else { xge_trace(XGE_ERR, "Getting register values failed"); } } return retValue; } /** * xge_ioctl * Callback to control the device - Interface configuration * * @ifnetp Interface Handle * @command Device control command * @data Parameters associated with command (if any) */ int xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data) { struct ifreq *ifreqp = (struct ifreq *)data; xge_lldev_t *lldev = ifnetp->if_softc; struct ifmedia *ifmediap = &lldev->media; int retValue = 0, mask = 0; if(lldev->in_detach) { return retValue; } switch(command) { - /* Set/Get ifnet address */ - case SIOCSIFADDR: - case SIOCGIFADDR: - ether_ioctl(ifnetp, command, data); - break; - /* Set ifnet MTU */ case SIOCSIFMTU: retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu); break; /* Set ifnet flags */ case SIOCSIFFLAGS: if(ifnetp->if_flags & IFF_UP) { /* Link status is UP */ if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) { xge_init(lldev); } xge_disable_promisc(lldev); xge_enable_promisc(lldev); } else { /* Link status is DOWN */ /* If device is in running, make it down */ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) { xge_stop(lldev); } } break; /* Add/delete multicast address */ case SIOCADDMULTI: case SIOCDELMULTI: if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) { xge_setmulti(lldev); } break; /* Set/Get net media */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command); break; /* Set capabilities */ case SIOCSIFCAP: mtx_lock(&lldev->mtx_drv); mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable; if(mask & IFCAP_TXCSUM) { if(ifnetp->if_capenable & IFCAP_TXCSUM) { ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); ifnetp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP | CSUM_TSO); } else { ifnetp->if_capenable |= IFCAP_TXCSUM; ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP); } } if(mask & IFCAP_TSO4) { if(ifnetp->if_capenable & IFCAP_TSO4) { ifnetp->if_capenable &= ~IFCAP_TSO4; ifnetp->if_hwassist &= ~CSUM_TSO; xge_os_printf("%s: TSO Disabled", device_get_nameunit(lldev->device)); } else if(ifnetp->if_capenable & IFCAP_TXCSUM) { ifnetp->if_capenable |= IFCAP_TSO4; ifnetp->if_hwassist |= CSUM_TSO; xge_os_printf("%s: TSO Enabled", device_get_nameunit(lldev->device)); } } mtx_unlock(&lldev->mtx_drv); break; /* Custom IOCTL 0 */ case SIOCGPRIVATE_0: retValue = xge_ioctl_stats(lldev, ifreqp); break; /* Custom IOCTL 1 */ case SIOCGPRIVATE_1: retValue = xge_ioctl_registers(lldev, ifreqp); break; default: - retValue = EINVAL; + retValue = ether_ioctl(ifnetp, command, data); break; } return retValue; } /** * xge_init * Initialize the interface * * @plldev Per-adapter Data */ void xge_init(void *plldev) { xge_lldev_t *lldev = (xge_lldev_t *)plldev; mtx_lock(&lldev->mtx_drv); xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t)); xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); mtx_unlock(&lldev->mtx_drv); } /** * xge_device_init * Initialize the interface (called by holding lock) * * @pdevin Per-adapter Data */ void xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { struct ifnet *ifnetp = lldev->ifnetp; xge_hal_device_t *hldev = lldev->devh; struct ifaddr *ifaddrp; unsigned char *macaddr; struct sockaddr_dl *sockaddrp; int status = XGE_HAL_OK; mtx_assert((&lldev->mtx_drv), MA_OWNED); /* If device is in running state, initializing is not required */ if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) return; /* Initializing timer */ callout_init(&lldev->timer, 1); xge_trace(XGE_TRACE, "Set MTU size"); status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu); if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "Setting MTU in HAL device failed"); goto _exit; } /* Enable HAL device */ xge_hal_device_enable(hldev); /* Get MAC address and update in HAL */ ifaddrp = ifnetp->if_addr; sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr; sockaddrp->sdl_type = IFT_ETHER; sockaddrp->sdl_alen = ifnetp->if_addrlen; macaddr = LLADDR(sockaddrp); xge_trace(XGE_TRACE, "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3), *(macaddr + 4), *(macaddr + 5)); status = xge_hal_device_macaddr_set(hldev, 0, macaddr); if(status != XGE_HAL_OK) xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status); /* Opening channels */ mtx_unlock(&lldev->mtx_drv); status = xge_channel_open(lldev, option); mtx_lock(&lldev->mtx_drv); if(status != XGE_HAL_OK) goto _exit; /* Set appropriate flags */ ifnetp->if_drv_flags |= IFF_DRV_RUNNING; ifnetp->if_flags &= ~IFF_DRV_OACTIVE; /* Checksum capability */ ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ? (CSUM_TCP | CSUM_UDP) : 0; if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4)) ifnetp->if_hwassist |= CSUM_TSO; /* Enable interrupts */ xge_hal_device_intr_enable(hldev); callout_reset(&lldev->timer, 10*hz, xge_timer, lldev); /* Disable promiscuous mode */ xge_trace(XGE_TRACE, "If opted, enable promiscuous mode"); xge_enable_promisc(lldev); /* Device is initialized */ lldev->initialized = 1; xge_os_mdelay(1000); _exit: return; } /** * xge_timer * Timer timeout function to handle link status * * @devp Per-adapter Data */ void xge_timer(void *devp) { xge_lldev_t *lldev = (xge_lldev_t *)devp; xge_hal_device_t *hldev = lldev->devh; /* Poll for changes */ xge_hal_device_poll(hldev); /* Reset timer */ callout_reset(&lldev->timer, hz, xge_timer, lldev); return; } /** * xge_stop * De-activate the interface * * @lldev Per-adater Data */ void xge_stop(xge_lldev_t *lldev) { mtx_lock(&lldev->mtx_drv); xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); mtx_unlock(&lldev->mtx_drv); } /** * xge_isr_filter * ISR filter function - to filter interrupts from other devices (shared) * * @handle Per-adapter Data * * Returns * FILTER_STRAY if interrupt is from other device * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device */ int xge_isr_filter(void *handle) { xge_lldev_t *lldev = (xge_lldev_t *)handle; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0); u16 retValue = FILTER_STRAY; u64 val64 = 0; XGE_DRV_STATS(isr_filter); val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0, &bar0->general_int_status); retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD; return retValue; } /** * xge_isr_line * Interrupt service routine for Line interrupts * * @plldev Per-adapter Data */ void xge_isr_line(void *plldev) { xge_hal_status_e status; xge_lldev_t *lldev = (xge_lldev_t *)plldev; xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; struct ifnet *ifnetp = lldev->ifnetp; XGE_DRV_STATS(isr_line); if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) { status = xge_hal_device_handle_irq(hldev); if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd))) xge_send(ifnetp); } } /* * xge_isr_msi * ISR for Message signaled interrupts */ void xge_isr_msi(void *plldev) { xge_lldev_t *lldev = (xge_lldev_t *)plldev; XGE_DRV_STATS(isr_msi); xge_hal_device_continue_irq(lldev->devh); } /** * xge_rx_open * Initiate and open all Rx channels * * @qid Ring Index * @lldev Per-adapter Data * @rflag Channel open/close/reopen flag * * Returns 0 or Error Number */ int xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag) { u64 adapter_status = 0x0; xge_hal_status_e status = XGE_HAL_FAIL; xge_hal_channel_attr_t attr = { .post_qid = qid, .compl_qid = 0, .callback = xge_rx_compl, .per_dtr_space = sizeof(xge_rx_priv_t), .flags = 0, .type = XGE_HAL_CHANNEL_TYPE_RING, .userdata = lldev, .dtr_init = xge_rx_initial_replenish, .dtr_term = xge_rx_term }; /* If device is not ready, return */ status = xge_hal_device_status(lldev->devh, &adapter_status); if(status != XGE_HAL_OK) { xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status); XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL); } else { status = xge_hal_channel_open(lldev->devh, &attr, &lldev->ring_channel[qid], rflag); } _exit: return status; } /** * xge_tx_open * Initialize and open all Tx channels * * @lldev Per-adapter Data * @tflag Channel open/close/reopen flag * * Returns 0 or Error Number */ int xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag) { xge_hal_status_e status = XGE_HAL_FAIL; u64 adapter_status = 0x0; int qindex, index; xge_hal_channel_attr_t attr = { .compl_qid = 0, .callback = xge_tx_compl, .per_dtr_space = sizeof(xge_tx_priv_t), .flags = 0, .type = XGE_HAL_CHANNEL_TYPE_FIFO, .userdata = lldev, .dtr_init = xge_tx_initial_replenish, .dtr_term = xge_tx_term }; /* If device is not ready, return */ status = xge_hal_device_status(lldev->devh, &adapter_status); if(status != XGE_HAL_OK) { xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status); XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL); } for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { attr.post_qid = qindex, status = xge_hal_channel_open(lldev->devh, &attr, &lldev->fifo_channel[qindex], tflag); if(status != XGE_HAL_OK) { for(index = 0; index < qindex; index++) xge_hal_channel_close(lldev->fifo_channel[index], tflag); } } _exit: return status; } /** * xge_enable_msi * Enables MSI * * @lldev Per-adapter Data */ void xge_enable_msi(xge_lldev_t *lldev) { xge_list_t *item = NULL; xge_hal_device_t *hldev = lldev->devh; xge_hal_channel_t *channel = NULL; u16 offset = 0, val16 = 0; xge_os_pci_read16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16); /* Update msi_data */ offset = (val16 & 0x80) ? 0x4c : 0x48; xge_os_pci_read16(lldev->pdev, NULL, offset, &val16); if(val16 & 0x1) val16 &= 0xfffe; else val16 |= 0x1; xge_os_pci_write16(lldev->pdev, NULL, offset, val16); /* Update msi_control */ xge_os_pci_read16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16); val16 |= 0x10; xge_os_pci_write16(lldev->pdev, NULL, xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16); /* Set TxMAT and RxMAT registers with MSI */ xge_list_for_each(item, &hldev->free_channels) { channel = xge_container_of(item, xge_hal_channel_t, item); xge_hal_channel_msi_set(channel, 1, (u32)val16); } } /** * xge_channel_open * Open both Tx and Rx channels * * @lldev Per-adapter Data * @option Channel reopen option */ int xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { xge_lro_entry_t *lro_session = NULL; xge_hal_status_e status = XGE_HAL_OK; int index = 0, index2 = 0; if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) { xge_msi_info_restore(lldev); xge_enable_msi(lldev); } _exit2: status = xge_create_dma_tags(lldev->device); if(status != XGE_HAL_OK) XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status); /* Open ring (Rx) channel */ for(index = 0; index < XGE_RING_COUNT; index++) { status = xge_rx_open(index, lldev, option); if(status != XGE_HAL_OK) { /* * DMA mapping fails in the unpatched Kernel which can't * allocate contiguous memory for Jumbo frames. * Try using 5 buffer mode. */ if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) && (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) > MJUMPAGESIZE)) { /* Close so far opened channels */ for(index2 = 0; index2 < index; index2++) { xge_hal_channel_close(lldev->ring_channel[index2], option); } /* Destroy DMA tags intended to use for 1 buffer mode */ if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map)) { xge_trace(XGE_ERR, "Rx extra DMA map destroy failed"); } if(bus_dma_tag_destroy(lldev->dma_tag_rx)) xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); if(bus_dma_tag_destroy(lldev->dma_tag_tx)) xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); /* Switch to 5 buffer mode */ lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5; xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu); /* Restart init */ goto _exit2; } else { XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1, status); } } } if(lldev->enabled_lro) { SLIST_INIT(&lldev->lro_free); SLIST_INIT(&lldev->lro_active); lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES; for(index = 0; index < lldev->lro_num; index++) { lro_session = (xge_lro_entry_t *) xge_os_malloc(NULL, sizeof(xge_lro_entry_t)); if(lro_session == NULL) { lldev->lro_num = index; break; } SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next); } } /* Open FIFO (Tx) channel */ status = xge_tx_open(lldev, option); if(status != XGE_HAL_OK) XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status); goto _exit; _exit1: /* * Opening Rx channel(s) failed (index is ) or * Initialization of LRO failed (index is XGE_RING_COUNT) * Opening Tx channel failed (index is XGE_RING_COUNT) */ for(index2 = 0; index2 < index; index2++) xge_hal_channel_close(lldev->ring_channel[index2], option); _exit: return status; } /** * xge_channel_close * Close both Tx and Rx channels * * @lldev Per-adapter Data * @option Channel reopen option * */ void xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { int qindex = 0; DELAY(1000 * 1000); /* Close FIFO (Tx) channel */ for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) xge_hal_channel_close(lldev->fifo_channel[qindex], option); /* Close Ring (Rx) channels */ for(qindex = 0; qindex < XGE_RING_COUNT; qindex++) xge_hal_channel_close(lldev->ring_channel[qindex], option); if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map)) xge_trace(XGE_ERR, "Rx extra map destroy failed"); if(bus_dma_tag_destroy(lldev->dma_tag_rx)) xge_trace(XGE_ERR, "Rx DMA tag destroy failed"); if(bus_dma_tag_destroy(lldev->dma_tag_tx)) xge_trace(XGE_ERR, "Tx DMA tag destroy failed"); } /** * dmamap_cb * DMA map callback * * @arg Parameter passed from dmamap * @segs Segments * @nseg Number of segments * @error Error */ void dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if(!error) { *(bus_addr_t *) arg = segs->ds_addr; } } /** * xge_reset * Device Reset * * @lldev Per-adapter Data */ void xge_reset(xge_lldev_t *lldev) { xge_trace(XGE_TRACE, "Reseting the chip"); /* If the device is not initialized, return */ if(lldev->initialized) { mtx_lock(&lldev->mtx_drv); xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL); xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL); mtx_unlock(&lldev->mtx_drv); } return; } /** * xge_setmulti * Set an address as a multicast address * * @lldev Per-adapter Data */ void xge_setmulti(xge_lldev_t *lldev) { struct ifmultiaddr *ifma; u8 *lladdr; xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh; struct ifnet *ifnetp = lldev->ifnetp; int index = 0; int offset = 1; int table_size = 47; xge_hal_status_e status = XGE_HAL_OK; u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) { status = xge_hal_device_mcast_enable(hldev); lldev->all_multicast = 1; } else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) { status = xge_hal_device_mcast_disable(hldev); lldev->all_multicast = 0; } if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "Enabling/disabling multicast failed"); goto _exit; } /* Updating address list */ if_maddr_rlock(ifnetp); index = 0; TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) { if(ifma->ifma_addr->sa_family != AF_LINK) { continue; } lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); index += 1; } if_maddr_runlock(ifnetp); if((!lldev->all_multicast) && (index)) { lldev->macaddr_count = (index + 1); if(lldev->macaddr_count > table_size) { goto _exit; } /* Clear old addresses */ for(index = 0; index < 48; index++) { xge_hal_device_macaddr_set(hldev, (offset + index), initial_addr); } } /* Add new addresses */ if_maddr_rlock(ifnetp); index = 0; TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) { if(ifma->ifma_addr->sa_family != AF_LINK) { continue; } lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); xge_hal_device_macaddr_set(hldev, (offset + index), lladdr); index += 1; } if_maddr_runlock(ifnetp); _exit: return; } /** * xge_enable_promisc * Enable Promiscuous Mode * * @lldev Per-adapter Data */ void xge_enable_promisc(xge_lldev_t *lldev) { struct ifnet *ifnetp = lldev->ifnetp; xge_hal_device_t *hldev = lldev->devh; xge_hal_pci_bar0_t *bar0 = NULL; u64 val64 = 0; bar0 = (xge_hal_pci_bar0_t *) hldev->bar0; if(ifnetp->if_flags & IFF_PROMISC) { xge_hal_device_promisc_enable(lldev->devh); /* * When operating in promiscuous mode, don't strip the VLAN tag */ val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0, &bar0->rx_pa_cfg); val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1); val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0); xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64, &bar0->rx_pa_cfg); xge_trace(XGE_TRACE, "Promiscuous mode ON"); } } /** * xge_disable_promisc * Disable Promiscuous Mode * * @lldev Per-adapter Data */ void xge_disable_promisc(xge_lldev_t *lldev) { xge_hal_device_t *hldev = lldev->devh; xge_hal_pci_bar0_t *bar0 = NULL; u64 val64 = 0; bar0 = (xge_hal_pci_bar0_t *) hldev->bar0; xge_hal_device_promisc_disable(lldev->devh); /* * Strip VLAN tag when operating in non-promiscuous mode */ val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0, &bar0->rx_pa_cfg); val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1); val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1); xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64, &bar0->rx_pa_cfg); xge_trace(XGE_TRACE, "Promiscuous mode OFF"); } /** * xge_change_mtu * Change interface MTU to a requested valid size * * @lldev Per-adapter Data * @NewMtu Requested MTU * * Returns 0 or Error Number */ int xge_change_mtu(xge_lldev_t *lldev, int new_mtu) { int status = XGE_HAL_OK; /* Check requested MTU size for boundary */ if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) { XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL); } lldev->mtu = new_mtu; xge_confirm_changes(lldev, XGE_SET_MTU); _exit: return status; } /** * xge_device_stop * * Common code for both stop and part of reset. Disables device, interrupts and * closes channels * * @dev Device Handle * @option Channel normal/reset option */ void xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option) { xge_hal_device_t *hldev = lldev->devh; struct ifnet *ifnetp = lldev->ifnetp; u64 val64 = 0; mtx_assert((&lldev->mtx_drv), MA_OWNED); /* If device is not in "Running" state, return */ if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) goto _exit; /* Set appropriate flags */ ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* Stop timer */ callout_stop(&lldev->timer); /* Disable interrupts */ xge_hal_device_intr_disable(hldev); mtx_unlock(&lldev->mtx_drv); xge_queue_flush(xge_hal_device_queue(lldev->devh)); mtx_lock(&lldev->mtx_drv); /* Disable HAL device */ if(xge_hal_device_disable(hldev) != XGE_HAL_OK) { xge_trace(XGE_ERR, "Disabling HAL device failed"); xge_hal_device_status(hldev, &val64); xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64); } /* Close Tx and Rx channels */ xge_channel_close(lldev, option); /* Reset HAL device */ xge_hal_device_reset(hldev); xge_os_mdelay(1000); lldev->initialized = 0; if_link_state_change(ifnetp, LINK_STATE_DOWN); _exit: return; } /** * xge_set_mbuf_cflags * set checksum flag for the mbuf * * @pkt Packet */ void xge_set_mbuf_cflags(mbuf_t pkt) { pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED; pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID; pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); pkt->m_pkthdr.csum_data = htons(0xffff); } /** * xge_lro_flush_sessions * Flush LRO session and send accumulated LRO packet to upper layer * * @lldev Per-adapter Data */ void xge_lro_flush_sessions(xge_lldev_t *lldev) { xge_lro_entry_t *lro_session = NULL; while(!SLIST_EMPTY(&lldev->lro_active)) { lro_session = SLIST_FIRST(&lldev->lro_active); SLIST_REMOVE_HEAD(&lldev->lro_active, next); xge_lro_flush(lldev, lro_session); } } /** * xge_lro_flush * Flush LRO session. Send accumulated LRO packet to upper layer * * @lldev Per-adapter Data * @lro LRO session to be flushed */ static void xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session) { struct ip *header_ip; struct tcphdr *header_tcp; u32 *ptr; if(lro_session->append_cnt) { header_ip = lro_session->lro_header_ip; header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN); lro_session->m_head->m_pkthdr.len = lro_session->len; header_tcp = (struct tcphdr *)(header_ip + 1); header_tcp->th_ack = lro_session->ack_seq; header_tcp->th_win = lro_session->window; if(lro_session->timestamp) { ptr = (u32 *)(header_tcp + 1); ptr[1] = htonl(lro_session->tsval); ptr[2] = lro_session->tsecr; } } (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head); lro_session->m_head = NULL; lro_session->timestamp = 0; lro_session->append_cnt = 0; SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next); } /** * xge_lro_accumulate * Accumulate packets to form a large LRO packet based on various conditions * * @lldev Per-adapter Data * @m_head Current Packet * * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure) */ static int xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head) { struct ether_header *header_ethernet; struct ip *header_ip; struct tcphdr *header_tcp; u32 seq, *ptr; struct mbuf *buffer_next, *buffer_tail; xge_lro_entry_t *lro_session; xge_hal_status_e status = XGE_HAL_FAIL; int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options; int trim; /* Get Ethernet header */ header_ethernet = mtod(m_head, struct ether_header *); /* Return if it is not IP packet */ if(header_ethernet->ether_type != htons(ETHERTYPE_IP)) goto _exit; /* Get IP header */ header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ? (struct ip *)(header_ethernet + 1) : mtod(m_head->m_next, struct ip *); /* Return if it is not TCP packet */ if(header_ip->ip_p != IPPROTO_TCP) goto _exit; /* Return if packet has options */ if((header_ip->ip_hl << 2) != sizeof(*header_ip)) goto _exit; /* Return if packet is fragmented */ if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK)) goto _exit; /* Get TCP header */ header_tcp = (struct tcphdr *)(header_ip + 1); /* Return if not ACK or PUSH */ if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0) goto _exit; /* Only timestamp option is handled */ tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp); tcp_hdr_len = sizeof(*header_tcp) + tcp_options; ptr = (u32 *)(header_tcp + 1); if(tcp_options != 0) { if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) || (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) { goto _exit; } } /* Total length of packet (IP) */ ip_len = ntohs(header_ip->ip_len); /* TCP data size */ tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip); /* If the frame is padded, trim it */ tot_len = m_head->m_pkthdr.len; trim = tot_len - (ip_len + ETHER_HDR_LEN); if(trim != 0) { if(trim < 0) goto _exit; m_adj(m_head, -trim); tot_len = m_head->m_pkthdr.len; } buffer_next = m_head; buffer_tail = NULL; while(buffer_next != NULL) { buffer_tail = buffer_next; buffer_next = buffer_tail->m_next; } /* Total size of only headers */ hlen = ip_len + ETHER_HDR_LEN - tcp_data_len; /* Get sequence number */ seq = ntohl(header_tcp->th_seq); SLIST_FOREACH(lro_session, &lldev->lro_active, next) { if(lro_session->source_port == header_tcp->th_sport && lro_session->dest_port == header_tcp->th_dport && lro_session->source_ip == header_ip->ip_src.s_addr && lro_session->dest_ip == header_ip->ip_dst.s_addr) { /* Unmatched sequence number, flush LRO session */ if(__predict_false(seq != lro_session->next_seq)) { SLIST_REMOVE(&lldev->lro_active, lro_session, xge_lro_entry_t, next); xge_lro_flush(lldev, lro_session); goto _exit; } /* Handle timestamp option */ if(tcp_options) { u32 tsval = ntohl(*(ptr + 1)); if(__predict_false(lro_session->tsval > tsval || *(ptr + 2) == 0)) { goto _exit; } lro_session->tsval = tsval; lro_session->tsecr = *(ptr + 2); } lro_session->next_seq += tcp_data_len; lro_session->ack_seq = header_tcp->th_ack; lro_session->window = header_tcp->th_win; /* If TCP data/payload is of 0 size, free mbuf */ if(tcp_data_len == 0) { m_freem(m_head); status = XGE_HAL_OK; goto _exit; } lro_session->append_cnt++; lro_session->len += tcp_data_len; /* Adjust mbuf so that m_data points to payload than headers */ m_adj(m_head, hlen); /* Append this packet to LRO accumulated packet */ lro_session->m_tail->m_next = m_head; lro_session->m_tail = buffer_tail; /* Flush if LRO packet is exceeding maximum size */ if(lro_session->len > (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) { SLIST_REMOVE(&lldev->lro_active, lro_session, xge_lro_entry_t, next); xge_lro_flush(lldev, lro_session); } status = XGE_HAL_OK; goto _exit; } } if(SLIST_EMPTY(&lldev->lro_free)) goto _exit; /* Start a new LRO session */ lro_session = SLIST_FIRST(&lldev->lro_free); SLIST_REMOVE_HEAD(&lldev->lro_free, next); SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next); lro_session->source_port = header_tcp->th_sport; lro_session->dest_port = header_tcp->th_dport; lro_session->source_ip = header_ip->ip_src.s_addr; lro_session->dest_ip = header_ip->ip_dst.s_addr; lro_session->next_seq = seq + tcp_data_len; lro_session->mss = tcp_data_len; lro_session->ack_seq = header_tcp->th_ack; lro_session->window = header_tcp->th_win; lro_session->lro_header_ip = header_ip; /* Handle timestamp option */ if(tcp_options) { lro_session->timestamp = 1; lro_session->tsval = ntohl(*(ptr + 1)); lro_session->tsecr = *(ptr + 2); } lro_session->len = tot_len; lro_session->m_head = m_head; lro_session->m_tail = buffer_tail; status = XGE_HAL_OK; _exit: return status; } /** * xge_accumulate_large_rx * Accumulate packets to form a large LRO packet based on various conditions * * @lldev Per-adapter Data * @pkt Current packet * @pkt_length Packet Length * @rxd_priv Rx Descriptor Private Data */ void xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length, xge_rx_priv_t *rxd_priv) { if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt); } } /** * xge_rx_compl * If the interrupt is due to received frame (Rx completion), send it up * * @channelh Ring Channel Handle * @dtr Current Descriptor * @t_code Transfer Code indicating success or error * @userdata Per-adapter Data * * Returns XGE_HAL_OK or HAL error enums */ xge_hal_status_e xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, void *userdata) { struct ifnet *ifnetp; xge_rx_priv_t *rxd_priv = NULL; mbuf_t mbuf_up = NULL; xge_hal_status_e status = XGE_HAL_OK; xge_hal_dtr_info_t ext_info; int index; u16 vlan_tag; /*get the user data portion*/ xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(!lldev) { XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL); } XGE_DRV_STATS(rx_completions); /* get the interface pointer */ ifnetp = lldev->ifnetp; do { XGE_DRV_STATS(rx_desc_compl); if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) { status = XGE_HAL_FAIL; goto _exit; } if(t_code) { xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code); XGE_DRV_STATS(rx_tcode); xge_hal_device_handle_tcode(channelh, dtr, t_code); xge_hal_ring_dtr_post(channelh,dtr); continue; } /* Get the private data for this descriptor*/ rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtr); if(!rxd_priv) { XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit, XGE_HAL_FAIL); } /* * Prepare one buffer to send it to upper layer -- since the upper * layer frees the buffer do not use rxd_priv->buffer. Meanwhile * prepare a new buffer, do mapping, use it in the current * descriptor and post descriptor back to ring channel */ mbuf_up = rxd_priv->bufferArray[0]; /* Gets details of mbuf i.e., packet length */ xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv); status = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ? xge_get_buf(dtr, rxd_priv, lldev, 0) : xge_get_buf_3b_5b(dtr, rxd_priv, lldev); if(status != XGE_HAL_OK) { xge_trace(XGE_ERR, "No memory"); XGE_DRV_STATS(rx_no_buf); /* * Unable to allocate buffer. Instead of discarding, post * descriptor back to channel for future processing of same * packet. */ xge_hal_ring_dtr_post(channelh, dtr); continue; } /* Get the extended information */ xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info); /* * As we have allocated a new mbuf for this descriptor, post this * descriptor with new mbuf back to ring channel */ vlan_tag = ext_info.vlan; xge_hal_ring_dtr_post(channelh, dtr); if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) && (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) && (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) && (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) { /* set Checksum Flag */ xge_set_mbuf_cflags(mbuf_up); if(lldev->enabled_lro) { xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len, rxd_priv); } else { /* Post-Read sync for buffers*/ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); } (*ifnetp->if_input)(ifnetp, mbuf_up); } } else { /* * Packet with erroneous checksum , let the upper layer deal * with it */ /* Post-Read sync for buffers*/ for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD); } if(vlan_tag) { mbuf_up->m_pkthdr.ether_vtag = vlan_tag; mbuf_up->m_flags |= M_VLANTAG; } if(lldev->enabled_lro) xge_lro_flush_sessions(lldev); (*ifnetp->if_input)(ifnetp, mbuf_up); } } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) == XGE_HAL_OK); if(lldev->enabled_lro) xge_lro_flush_sessions(lldev); _exit: return status; } /** * xge_ring_dtr_get * Get descriptors * * @mbuf_up Packet to send up * @channelh Ring Channel Handle * @dtr Descriptor * @lldev Per-adapter Data * @rxd_priv Rx Descriptor Private Data * * Returns XGE_HAL_OK or HAL error enums */ int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr, xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv) { mbuf_t m; int pkt_length[5]={0,0}, pkt_len=0; dma_addr_t dma_data[5]; int index; m = mbuf_up; pkt_len = 0; if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { xge_os_memzero(pkt_length, sizeof(pkt_length)); /* * Retrieve data of interest from the completed descriptor -- This * returns the packet length */ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length); } else { xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length); } for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { m->m_len = pkt_length[index]; if(index < (lldev->rxd_mbuf_cnt-1)) { m->m_next = rxd_priv->bufferArray[index + 1]; m = m->m_next; } else { m->m_next = NULL; } pkt_len+=pkt_length[index]; } /* * Since 2 buffer mode is an exceptional case where data is in 3rd * buffer but not in 2nd buffer */ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) { m->m_len = pkt_length[2]; pkt_len+=pkt_length[2]; } /* * Update length of newly created buffer to be sent up with packet * length */ mbuf_up->m_pkthdr.len = pkt_len; } else { /* * Retrieve data of interest from the completed descriptor -- This * returns the packet length */ xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]); /* * Update length of newly created buffer to be sent up with packet * length */ mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0]; } return XGE_HAL_OK; } /** * xge_flush_txds * Flush Tx descriptors * * @channelh Channel handle */ static void inline xge_flush_txds(xge_hal_channel_h channelh) { xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); xge_hal_dtr_h tx_dtr; xge_tx_priv_t *tx_priv; u8 t_code; while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code) == XGE_HAL_OK) { XGE_DRV_STATS(tx_desc_compl); if(t_code) { xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code); XGE_DRV_STATS(tx_tcode); xge_hal_device_handle_tcode(channelh, tx_dtr, t_code); } tx_priv = xge_hal_fifo_dtr_private(tx_dtr); bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map); m_freem(tx_priv->buffer); tx_priv->buffer = NULL; xge_hal_fifo_dtr_free(channelh, tx_dtr); } } /** * xge_send * Transmit function * * @ifnetp Interface Handle */ void xge_send(struct ifnet *ifnetp) { int qindex = 0; xge_lldev_t *lldev = ifnetp->if_softc; for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) { if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) { XGE_DRV_STATS(tx_lock_fail); break; } xge_send_locked(ifnetp, qindex); mtx_unlock(&lldev->mtx_tx[qindex]); } } static void inline xge_send_locked(struct ifnet *ifnetp, int qindex) { xge_hal_dtr_h dtr; static bus_dma_segment_t segs[XGE_MAX_SEGS]; xge_hal_status_e status; unsigned int max_fragments; xge_lldev_t *lldev = ifnetp->if_softc; xge_hal_channel_h channelh = lldev->fifo_channel[qindex]; mbuf_t m_head = NULL; mbuf_t m_buf = NULL; xge_tx_priv_t *ll_tx_priv = NULL; register unsigned int count = 0; unsigned int nsegs = 0; u16 vlan_tag; max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags; /* If device is not initialized, return */ if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))) return; XGE_DRV_STATS(tx_calls); /* * This loop will be executed for each packet in the kernel maintained * queue -- each packet can be with fragments as an mbuf chain */ for(;;) { IF_DEQUEUE(&ifnetp->if_snd, m_head); if (m_head == NULL) { ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE); return; } for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) { if(m_buf->m_len) count += 1; } if(count >= max_fragments) { m_buf = m_defrag(m_head, M_NOWAIT); if(m_buf != NULL) m_head = m_buf; XGE_DRV_STATS(tx_defrag); } /* Reserve descriptors */ status = xge_hal_fifo_dtr_reserve(channelh, &dtr); if(status != XGE_HAL_OK) { XGE_DRV_STATS(tx_no_txd); xge_flush_txds(channelh); break; } vlan_tag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag); /* Update Tx private structure for this descriptor */ ll_tx_priv = xge_hal_fifo_dtr_private(dtr); ll_tx_priv->buffer = m_head; /* * Do mapping -- Required DMA tag has been created in xge_init * function and DMA maps have already been created in the * xgell_tx_replenish function. * Returns number of segments through nsegs */ if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx, ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) { xge_trace(XGE_TRACE, "DMA map load failed"); XGE_DRV_STATS(tx_map_fail); break; } if(lldev->driver_stats.tx_max_frags < nsegs) lldev->driver_stats.tx_max_frags = nsegs; /* Set descriptor buffer for header and each fragment/segment */ count = 0; do { xge_hal_fifo_dtr_buffer_set(channelh, dtr, count, (dma_addr_t)htole64(segs[count].ds_addr), segs[count].ds_len); count++; } while(count < nsegs); /* Pre-write Sync of mapping */ bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map, BUS_DMASYNC_PREWRITE); if((lldev->enabled_tso) && (m_head->m_pkthdr.csum_flags & CSUM_TSO)) { XGE_DRV_STATS(tx_tso); xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz); } /* Checksum */ if(ifnetp->if_hwassist > 0) { xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN); } /* Post descriptor to FIFO channel */ xge_hal_fifo_dtr_post(channelh, dtr); XGE_DRV_STATS(tx_posted); /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter) * listener so that we can use tools like tcpdump */ ETHER_BPF_MTAP(ifnetp, m_head); } /* Prepend the packet back to queue */ IF_PREPEND(&ifnetp->if_snd, m_head); ifnetp->if_drv_flags |= IFF_DRV_OACTIVE; xge_queue_produce_context(xge_hal_device_queue(lldev->devh), XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh); XGE_DRV_STATS(tx_again); } /** * xge_get_buf * Allocates new mbufs to be placed into descriptors * * @dtrh Descriptor Handle * @rxd_priv Rx Descriptor Private Data * @lldev Per-adapter Data * @index Buffer Index (if multi-buffer mode) * * Returns XGE_HAL_OK or HAL error enums */ int xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv, xge_lldev_t *lldev, int index) { register mbuf_t mp = NULL; struct ifnet *ifnetp = lldev->ifnetp; int status = XGE_HAL_OK; int buffer_size = 0, cluster_size = 0, count; bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map; bus_dma_segment_t segs[3]; buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ? ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE : lldev->rxd_mbuf_len[index]; if(buffer_size <= MCLBYTES) { cluster_size = MCLBYTES; mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); } else { cluster_size = MJUMPAGESIZE; if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) && (buffer_size > MJUMPAGESIZE)) { cluster_size = MJUM9BYTES; } mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size); } if(!mp) { xge_trace(XGE_ERR, "Out of memory to allocate mbuf"); status = XGE_HAL_FAIL; goto getbuf_out; } /* Update mbuf's length, packet length and receive interface */ mp->m_len = mp->m_pkthdr.len = buffer_size; mp->m_pkthdr.rcvif = ifnetp; /* Load DMA map */ if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map, mp, segs, &count, BUS_DMA_NOWAIT)) { XGE_DRV_STATS(rx_map_fail); m_freem(mp); XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL); } /* Update descriptor private data */ rxd_priv->bufferArray[index] = mp; rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr); rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map; lldev->extra_dma_map = map; /* Pre-Read/Write sync */ bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD); /* Unload DMA map of mbuf in current descriptor */ bus_dmamap_unload(lldev->dma_tag_rx, map); /* Set descriptor buffer */ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr, cluster_size); } getbuf_out: return status; } /** * xge_get_buf_3b_5b * Allocates new mbufs to be placed into descriptors (in multi-buffer modes) * * @dtrh Descriptor Handle * @rxd_priv Rx Descriptor Private Data * @lldev Per-adapter Data * * Returns XGE_HAL_OK or HAL error enums */ int xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv, xge_lldev_t *lldev) { bus_addr_t dma_pointers[5]; int dma_sizes[5]; int status = XGE_HAL_OK, index; int newindex = 0; for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { status = xge_get_buf(dtrh, rxd_priv, lldev, index); if(status != XGE_HAL_OK) { for(newindex = 0; newindex < index; newindex++) { m_freem(rxd_priv->bufferArray[newindex]); } XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status); } } for(index = 0; index < lldev->buffer_mode; index++) { if(lldev->rxd_mbuf_len[index] != 0) { dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr; dma_sizes[index] = lldev->rxd_mbuf_len[index]; } else { dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr; dma_sizes[index] = 1; } } /* Assigning second buffer to third pointer in 2 buffer mode */ if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) { dma_pointers[2] = dma_pointers[1]; dma_sizes[2] = dma_sizes[1]; dma_sizes[1] = 1; } if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes); } else { xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes); } _exit: return status; } /** * xge_tx_compl * If the interrupt is due to Tx completion, free the sent buffer * * @channelh Channel Handle * @dtr Descriptor * @t_code Transfer Code indicating success or error * @userdata Per-adapter Data * * Returns XGE_HAL_OK or HAL error enum */ xge_hal_status_e xge_tx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code, void *userdata) { xge_tx_priv_t *ll_tx_priv = NULL; xge_lldev_t *lldev = (xge_lldev_t *)userdata; struct ifnet *ifnetp = lldev->ifnetp; mbuf_t m_buffer = NULL; int qindex = xge_hal_channel_id(channelh); mtx_lock(&lldev->mtx_tx[qindex]); XGE_DRV_STATS(tx_completions); /* * For each completed descriptor: Get private structure, free buffer, * do unmapping, and free descriptor */ do { XGE_DRV_STATS(tx_desc_compl); if(t_code) { XGE_DRV_STATS(tx_tcode); xge_trace(XGE_TRACE, "t_code %d", t_code); xge_hal_device_handle_tcode(channelh, dtr, t_code); } ll_tx_priv = xge_hal_fifo_dtr_private(dtr); m_buffer = ll_tx_priv->buffer; bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map); m_freem(m_buffer); ll_tx_priv->buffer = NULL; xge_hal_fifo_dtr_free(channelh, dtr); } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) == XGE_HAL_OK); xge_send_locked(ifnetp, qindex); ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE; mtx_unlock(&lldev->mtx_tx[qindex]); return XGE_HAL_OK; } /** * xge_tx_initial_replenish * Initially allocate buffers and set them into descriptors for later use * * @channelh Tx Channel Handle * @dtrh Descriptor Handle * @index * @userdata Per-adapter Data * @reopen Channel open/reopen option * * Returns XGE_HAL_OK or HAL error enums */ xge_hal_status_e xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, int index, void *userdata, xge_hal_channel_reopen_e reopen) { xge_tx_priv_t *txd_priv = NULL; int status = XGE_HAL_OK; /* Get the user data portion from channel handle */ xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(lldev == NULL) { XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out, XGE_HAL_FAIL); } /* Get the private data */ txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh); if(txd_priv == NULL) { XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out, XGE_HAL_FAIL); } /* Create DMA map for this descriptor */ if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT, &txd_priv->dma_map)) { XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed", txinit_out, XGE_HAL_FAIL); } txinit_out: return status; } /** * xge_rx_initial_replenish * Initially allocate buffers and set them into descriptors for later use * * @channelh Tx Channel Handle * @dtrh Descriptor Handle * @index Ring Index * @userdata Per-adapter Data * @reopen Channel open/reopen option * * Returns XGE_HAL_OK or HAL error enums */ xge_hal_status_e xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, int index, void *userdata, xge_hal_channel_reopen_e reopen) { xge_rx_priv_t *rxd_priv = NULL; int status = XGE_HAL_OK; int index1 = 0, index2 = 0; /* Get the user data portion from channel handle */ xge_lldev_t *lldev = xge_hal_channel_userdata(channelh); if(lldev == NULL) { XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out, XGE_HAL_FAIL); } /* Get the private data */ rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); if(rxd_priv == NULL) { XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out, XGE_HAL_FAIL); } rxd_priv->bufferArray = xge_os_malloc(NULL, (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); if(rxd_priv->bufferArray == NULL) { XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out, XGE_HAL_FAIL); } if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) { /* Create DMA map for these descriptors*/ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT, &rxd_priv->dmainfo[0].dma_map)) { XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed", rxinit_err_out, XGE_HAL_FAIL); } /* Get a buffer, attach it to this descriptor */ status = xge_get_buf(dtrh, rxd_priv, lldev, 0); } else { for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) { /* Create DMA map for this descriptor */ if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT , &rxd_priv->dmainfo[index1].dma_map)) { for(index2 = index1 - 1; index2 >= 0; index2--) { bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index2].dma_map); } XGE_EXIT_ON_ERR( "Jumbo DMA map creation for Rx descriptor failed", rxinit_err_out, XGE_HAL_FAIL); } } status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev); } if(status != XGE_HAL_OK) { for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) { bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index1].dma_map); } goto rxinit_err_out; } else { goto rxinit_out; } rxinit_err_out: xge_os_free(NULL, rxd_priv->bufferArray, (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); rxinit_out: return status; } /** * xge_rx_term * During unload terminate and free all descriptors * * @channelh Rx Channel Handle * @dtrh Rx Descriptor Handle * @state Descriptor State * @userdata Per-adapter Data * @reopen Channel open/reopen option */ void xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) { xge_rx_priv_t *rxd_priv = NULL; xge_lldev_t *lldev = NULL; int index = 0; /* Descriptor state is not "Posted" */ if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out; /* Get the user data portion */ lldev = xge_hal_channel_userdata(channelh); /* Get the private data */ rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh); for(index = 0; index < lldev->rxd_mbuf_cnt; index++) { if(rxd_priv->dmainfo[index].dma_map != NULL) { bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map); if(rxd_priv->bufferArray[index] != NULL) m_free(rxd_priv->bufferArray[index]); bus_dmamap_destroy(lldev->dma_tag_rx, rxd_priv->dmainfo[index].dma_map); } } xge_os_free(NULL, rxd_priv->bufferArray, (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt)); /* Free the descriptor */ xge_hal_ring_dtr_free(channelh, dtrh); rxterm_out: return; } /** * xge_tx_term * During unload terminate and free all descriptors * * @channelh Rx Channel Handle * @dtrh Rx Descriptor Handle * @state Descriptor State * @userdata Per-adapter Data * @reopen Channel open/reopen option */ void xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen) { xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr); xge_lldev_t *lldev = (xge_lldev_t *)userdata; /* Destroy DMA map */ bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map); } /** * xge_methods * * FreeBSD device interface entry points */ static device_method_t xge_methods[] = { DEVMETHOD(device_probe, xge_probe), DEVMETHOD(device_attach, xge_attach), DEVMETHOD(device_detach, xge_detach), DEVMETHOD(device_shutdown, xge_shutdown), DEVMETHOD_END }; static driver_t xge_driver = { "nxge", xge_methods, sizeof(xge_lldev_t), }; static devclass_t xge_devclass; DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0); Index: head/sys/dev/vxge/vxge.c =================================================================== --- head/sys/dev/vxge/vxge.c (revision 331828) +++ head/sys/dev/vxge/vxge.c (revision 331829) @@ -1,4203 +1,4197 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright(c) 2002-2011 Exar Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification are permitted provided the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Exar Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include static int vxge_pci_bd_no = -1; static u32 vxge_drv_copyright = 0; static u32 vxge_dev_ref_count = 0; static u32 vxge_dev_req_reboot = 0; static int vpath_selector[VXGE_HAL_MAX_VIRTUAL_PATHS] = \ {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; /* * vxge_probe * Probes for x3100 devices */ int vxge_probe(device_t ndev) { int err = ENXIO; u16 pci_bd_no = 0; u16 pci_vendor_id = 0; u16 pci_device_id = 0; char adapter_name[64]; pci_vendor_id = pci_get_vendor(ndev); if (pci_vendor_id != VXGE_PCI_VENDOR_ID) goto _exit0; pci_device_id = pci_get_device(ndev); if (pci_device_id == VXGE_PCI_DEVICE_ID_TITAN_1) { pci_bd_no = (pci_get_bus(ndev) | pci_get_slot(ndev)); snprintf(adapter_name, sizeof(adapter_name), VXGE_ADAPTER_NAME, pci_get_revid(ndev)); device_set_desc_copy(ndev, adapter_name); if (!vxge_drv_copyright) { device_printf(ndev, VXGE_COPYRIGHT); vxge_drv_copyright = 1; } if (vxge_dev_req_reboot == 0) { vxge_pci_bd_no = pci_bd_no; err = BUS_PROBE_DEFAULT; } else { if (pci_bd_no != vxge_pci_bd_no) { vxge_pci_bd_no = pci_bd_no; err = BUS_PROBE_DEFAULT; } } } _exit0: return (err); } /* * vxge_attach * Connects driver to the system if probe was success @ndev handle */ int vxge_attach(device_t ndev) { int err = 0; vxge_dev_t *vdev; vxge_hal_device_t *hldev = NULL; vxge_hal_device_attr_t device_attr; vxge_free_resources_e error_level = VXGE_FREE_NONE; vxge_hal_status_e status = VXGE_HAL_OK; /* Get per-ndev buffer */ vdev = (vxge_dev_t *) device_get_softc(ndev); if (!vdev) goto _exit0; bzero(vdev, sizeof(vxge_dev_t)); vdev->ndev = ndev; strlcpy(vdev->ndev_name, "vxge", sizeof(vdev->ndev_name)); err = vxge_driver_config(vdev); if (err != 0) goto _exit0; /* Initialize HAL driver */ status = vxge_driver_init(vdev); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "Failed to initialize driver\n"); goto _exit0; } /* Enable PCI bus-master */ pci_enable_busmaster(ndev); /* Allocate resources */ err = vxge_alloc_resources(vdev); if (err != 0) { device_printf(vdev->ndev, "resource allocation failed\n"); goto _exit0; } err = vxge_device_hw_info_get(vdev); if (err != 0) { error_level = VXGE_FREE_BAR2; goto _exit0; } /* Get firmware default values for Device Configuration */ vxge_hal_device_config_default_get(vdev->device_config); /* Customize Device Configuration based on User request */ vxge_vpath_config(vdev); /* Allocate ISR resources */ err = vxge_alloc_isr_resources(vdev); if (err != 0) { error_level = VXGE_FREE_ISR_RESOURCE; device_printf(vdev->ndev, "isr resource allocation failed\n"); goto _exit0; } /* HAL attributes */ device_attr.bar0 = (u8 *) vdev->pdev->bar_info[0]; device_attr.bar1 = (u8 *) vdev->pdev->bar_info[1]; device_attr.bar2 = (u8 *) vdev->pdev->bar_info[2]; device_attr.regh0 = (vxge_bus_res_t *) vdev->pdev->reg_map[0]; device_attr.regh1 = (vxge_bus_res_t *) vdev->pdev->reg_map[1]; device_attr.regh2 = (vxge_bus_res_t *) vdev->pdev->reg_map[2]; device_attr.irqh = (pci_irq_h) vdev->config.isr_info[0].irq_handle; device_attr.cfgh = vdev->pdev; device_attr.pdev = vdev->pdev; /* Initialize HAL Device */ status = vxge_hal_device_initialize((vxge_hal_device_h *) &hldev, &device_attr, vdev->device_config); if (status != VXGE_HAL_OK) { error_level = VXGE_FREE_ISR_RESOURCE; device_printf(vdev->ndev, "hal device initialization failed\n"); goto _exit0; } vdev->devh = hldev; vxge_hal_device_private_set(hldev, vdev); if (vdev->is_privilaged) { err = vxge_firmware_verify(vdev); if (err != 0) { vxge_dev_req_reboot = 1; error_level = VXGE_FREE_TERMINATE_DEVICE; goto _exit0; } } /* Allocate memory for vpath */ vdev->vpaths = (vxge_vpath_t *) vxge_mem_alloc(vdev->no_of_vpath * sizeof(vxge_vpath_t)); if (vdev->vpaths == NULL) { error_level = VXGE_FREE_TERMINATE_DEVICE; device_printf(vdev->ndev, "vpath memory allocation failed\n"); goto _exit0; } vdev->no_of_func = 1; if (vdev->is_privilaged) { vxge_hal_func_mode_count(vdev->devh, vdev->config.hw_info.function_mode, &vdev->no_of_func); vxge_bw_priority_config(vdev); } /* Initialize mutexes */ vxge_mutex_init(vdev); /* Initialize Media */ vxge_media_init(vdev); err = vxge_ifp_setup(ndev); if (err != 0) { error_level = VXGE_FREE_MEDIA; device_printf(vdev->ndev, "setting up interface failed\n"); goto _exit0; } err = vxge_isr_setup(vdev); if (err != 0) { error_level = VXGE_FREE_INTERFACE; device_printf(vdev->ndev, "failed to associate interrupt handler with device\n"); goto _exit0; } vxge_device_hw_info_print(vdev); vdev->is_active = TRUE; _exit0: if (error_level) { vxge_free_resources(ndev, error_level); err = ENXIO; } return (err); } /* * vxge_detach * Detaches driver from the Kernel subsystem */ int vxge_detach(device_t ndev) { vxge_dev_t *vdev; vdev = (vxge_dev_t *) device_get_softc(ndev); if (vdev->is_active) { vdev->is_active = FALSE; vxge_stop(vdev); vxge_free_resources(ndev, VXGE_FREE_ALL); } return (0); } /* * vxge_shutdown * To shutdown device before system shutdown */ int vxge_shutdown(device_t ndev) { vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev); vxge_stop(vdev); return (0); } /* * vxge_init * Initialize the interface */ void vxge_init(void *vdev_ptr) { vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr; VXGE_DRV_LOCK(vdev); vxge_init_locked(vdev); VXGE_DRV_UNLOCK(vdev); } /* * vxge_init_locked * Initialize the interface */ void vxge_init_locked(vxge_dev_t *vdev) { int i, err = EINVAL; vxge_hal_device_t *hldev = vdev->devh; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_vpath_h vpath_handle; ifnet_t ifp = vdev->ifp; /* If device is in running state, initializing is not required */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) goto _exit0; VXGE_DRV_LOCK_ASSERT(vdev); /* Opening vpaths */ err = vxge_vpath_open(vdev); if (err != 0) goto _exit1; if (vdev->config.rth_enable) { status = vxge_rth_config(vdev); if (status != VXGE_HAL_OK) goto _exit1; } for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; /* check initial mtu before enabling the device */ status = vxge_hal_device_mtu_check(vpath_handle, ifp->if_mtu); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "invalid mtu size %u specified\n", ifp->if_mtu); goto _exit1; } status = vxge_hal_vpath_mtu_set(vpath_handle, ifp->if_mtu); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "setting mtu in device failed\n"); goto _exit1; } } /* Enable HAL device */ status = vxge_hal_device_enable(hldev); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "failed to enable device\n"); goto _exit1; } if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) vxge_msix_enable(vdev); /* Checksum capability */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; /* Enabling mcast for all vpath */ vxge_hal_vpath_mcast_enable(vpath_handle); /* Enabling bcast for all vpath */ status = vxge_hal_vpath_bcast_enable(vpath_handle); if (status != VXGE_HAL_OK) device_printf(vdev->ndev, "can't enable bcast on vpath (%d)\n", i); } /* Enable interrupts */ vxge_hal_device_intr_enable(vdev->devh); for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; bzero(&(vdev->vpaths[i].driver_stats), sizeof(vxge_drv_stats_t)); status = vxge_hal_vpath_enable(vpath_handle); if (status != VXGE_HAL_OK) goto _exit2; } vxge_os_mdelay(1000); /* Device is initialized */ vdev->is_initialized = TRUE; /* Now inform the stack we're ready */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; goto _exit0; _exit2: vxge_hal_device_intr_disable(vdev->devh); vxge_hal_device_disable(hldev); _exit1: vxge_vpath_close(vdev); _exit0: return; } /* * vxge_driver_init * Initializes HAL driver */ vxge_hal_status_e vxge_driver_init(vxge_dev_t *vdev) { vxge_hal_uld_cbs_t uld_callbacks; vxge_hal_driver_config_t driver_config; vxge_hal_status_e status = VXGE_HAL_OK; /* Initialize HAL driver */ if (!vxge_dev_ref_count) { bzero(&uld_callbacks, sizeof(vxge_hal_uld_cbs_t)); bzero(&driver_config, sizeof(vxge_hal_driver_config_t)); uld_callbacks.link_up = vxge_link_up; uld_callbacks.link_down = vxge_link_down; uld_callbacks.crit_err = vxge_crit_error; uld_callbacks.sched_timer = NULL; uld_callbacks.xpak_alarm_log = NULL; status = vxge_hal_driver_initialize(&driver_config, &uld_callbacks); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "failed to initialize driver\n"); goto _exit0; } } vxge_hal_driver_debug_set(VXGE_TRACE); vxge_dev_ref_count++; _exit0: return (status); } /* * vxge_driver_config */ int vxge_driver_config(vxge_dev_t *vdev) { int i, err = 0; char temp_buffer[30]; vxge_bw_info_t bw_info; VXGE_GET_PARAM("hint.vxge.0.no_of_vpath", vdev->config, no_of_vpath, VXGE_DEFAULT_USER_HARDCODED); if (vdev->config.no_of_vpath == VXGE_DEFAULT_USER_HARDCODED) vdev->config.no_of_vpath = mp_ncpus; if (vdev->config.no_of_vpath <= 0) { err = EINVAL; device_printf(vdev->ndev, "Failed to load driver, \ invalid config : \'no_of_vpath\'\n"); goto _exit0; } VXGE_GET_PARAM("hint.vxge.0.intr_coalesce", vdev->config, intr_coalesce, VXGE_DEFAULT_CONFIG_DISABLE); VXGE_GET_PARAM("hint.vxge.0.rth_enable", vdev->config, rth_enable, VXGE_DEFAULT_CONFIG_ENABLE); VXGE_GET_PARAM("hint.vxge.0.rth_bkt_sz", vdev->config, rth_bkt_sz, VXGE_DEFAULT_RTH_BUCKET_SIZE); VXGE_GET_PARAM("hint.vxge.0.lro_enable", vdev->config, lro_enable, VXGE_DEFAULT_CONFIG_ENABLE); VXGE_GET_PARAM("hint.vxge.0.tso_enable", vdev->config, tso_enable, VXGE_DEFAULT_CONFIG_ENABLE); VXGE_GET_PARAM("hint.vxge.0.tx_steering", vdev->config, tx_steering, VXGE_DEFAULT_CONFIG_DISABLE); VXGE_GET_PARAM("hint.vxge.0.msix_enable", vdev->config, intr_mode, VXGE_HAL_INTR_MODE_MSIX); VXGE_GET_PARAM("hint.vxge.0.ifqmaxlen", vdev->config, ifq_maxlen, VXGE_DEFAULT_CONFIG_IFQ_MAXLEN); VXGE_GET_PARAM("hint.vxge.0.port_mode", vdev->config, port_mode, VXGE_DEFAULT_CONFIG_VALUE); if (vdev->config.port_mode == VXGE_DEFAULT_USER_HARDCODED) vdev->config.port_mode = VXGE_DEFAULT_CONFIG_VALUE; VXGE_GET_PARAM("hint.vxge.0.l2_switch", vdev->config, l2_switch, VXGE_DEFAULT_CONFIG_VALUE); if (vdev->config.l2_switch == VXGE_DEFAULT_USER_HARDCODED) vdev->config.l2_switch = VXGE_DEFAULT_CONFIG_VALUE; VXGE_GET_PARAM("hint.vxge.0.fw_upgrade", vdev->config, fw_option, VXGE_FW_UPGRADE_ALL); VXGE_GET_PARAM("hint.vxge.0.low_latency", vdev->config, low_latency, VXGE_DEFAULT_CONFIG_DISABLE); VXGE_GET_PARAM("hint.vxge.0.func_mode", vdev->config, function_mode, VXGE_DEFAULT_CONFIG_VALUE); if (vdev->config.function_mode == VXGE_DEFAULT_USER_HARDCODED) vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE; if (!(is_multi_func(vdev->config.function_mode) || is_single_func(vdev->config.function_mode))) vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE; for (i = 0; i < VXGE_HAL_MAX_FUNCTIONS; i++) { bw_info.func_id = i; sprintf(temp_buffer, "hint.vxge.0.bandwidth_%d", i); VXGE_GET_PARAM(temp_buffer, bw_info, bandwidth, VXGE_DEFAULT_USER_HARDCODED); if (bw_info.bandwidth == VXGE_DEFAULT_USER_HARDCODED) bw_info.bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT; sprintf(temp_buffer, "hint.vxge.0.priority_%d", i); VXGE_GET_PARAM(temp_buffer, bw_info, priority, VXGE_DEFAULT_USER_HARDCODED); if (bw_info.priority == VXGE_DEFAULT_USER_HARDCODED) bw_info.priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT; vxge_os_memcpy(&vdev->config.bw_info[i], &bw_info, sizeof(vxge_bw_info_t)); } _exit0: return (err); } /* * vxge_stop */ void vxge_stop(vxge_dev_t *vdev) { VXGE_DRV_LOCK(vdev); vxge_stop_locked(vdev); VXGE_DRV_UNLOCK(vdev); } /* * vxge_stop_locked * Common code for both stop and part of reset. * disables device, interrupts and closes vpaths handle */ void vxge_stop_locked(vxge_dev_t *vdev) { u64 adapter_status = 0; vxge_hal_status_e status; vxge_hal_device_t *hldev = vdev->devh; ifnet_t ifp = vdev->ifp; VXGE_DRV_LOCK_ASSERT(vdev); /* If device is not in "Running" state, return */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; /* Set appropriate flags */ vdev->is_initialized = FALSE; hldev->link_state = VXGE_HAL_LINK_NONE; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if_link_state_change(ifp, LINK_STATE_DOWN); /* Disable interrupts */ vxge_hal_device_intr_disable(hldev); /* Disable HAL device */ status = vxge_hal_device_disable(hldev); if (status != VXGE_HAL_OK) { vxge_hal_device_status(hldev, &adapter_status); device_printf(vdev->ndev, "adapter status: 0x%llx\n", adapter_status); } /* reset vpaths */ vxge_vpath_reset(vdev); vxge_os_mdelay(1000); /* Close Vpaths */ vxge_vpath_close(vdev); } void vxge_send(ifnet_t ifp) { vxge_vpath_t *vpath; vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; vpath = &(vdev->vpaths[0]); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (VXGE_TX_TRYLOCK(vpath)) { vxge_send_locked(ifp, vpath); VXGE_TX_UNLOCK(vpath); } } } static inline void vxge_send_locked(ifnet_t ifp, vxge_vpath_t *vpath) { mbuf_t m_head = NULL; vxge_dev_t *vdev = vpath->vdev; VXGE_TX_LOCK_ASSERT(vpath); if ((!vdev->is_initialized) || ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)) return; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (vxge_xmit(ifp, vpath, &m_head)) { if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); VXGE_DRV_STATS(vpath, tx_again); break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); } } #if __FreeBSD_version >= 800000 int vxge_mq_send(ifnet_t ifp, mbuf_t m_head) { int i = 0, err = 0; vxge_vpath_t *vpath; vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; if (vdev->config.tx_steering) { i = vxge_vpath_get(vdev, m_head); } else if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) { i = m_head->m_pkthdr.flowid % vdev->no_of_vpath; } vpath = &(vdev->vpaths[i]); if (VXGE_TX_TRYLOCK(vpath)) { err = vxge_mq_send_locked(ifp, vpath, m_head); VXGE_TX_UNLOCK(vpath); } else err = drbr_enqueue(ifp, vpath->br, m_head); return (err); } static inline int vxge_mq_send_locked(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t m_head) { int err = 0; mbuf_t next = NULL; vxge_dev_t *vdev = vpath->vdev; VXGE_TX_LOCK_ASSERT(vpath); if ((!vdev->is_initialized) || ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)) { err = drbr_enqueue(ifp, vpath->br, m_head); goto _exit0; } if (m_head == NULL) { next = drbr_dequeue(ifp, vpath->br); } else if (drbr_needs_enqueue(ifp, vpath->br)) { if ((err = drbr_enqueue(ifp, vpath->br, m_head)) != 0) goto _exit0; next = drbr_dequeue(ifp, vpath->br); } else next = m_head; /* Process the queue */ while (next != NULL) { if ((err = vxge_xmit(ifp, vpath, &next)) != 0) { if (next == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; err = drbr_enqueue(ifp, vpath->br, next); VXGE_DRV_STATS(vpath, tx_again); break; } if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); if (next->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; next = drbr_dequeue(ifp, vpath->br); } _exit0: return (err); } void vxge_mq_qflush(ifnet_t ifp) { int i; mbuf_t m_head; vxge_vpath_t *vpath; vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); if (!vpath->handle) continue; VXGE_TX_LOCK(vpath); while ((m_head = buf_ring_dequeue_sc(vpath->br)) != NULL) vxge_free_packet(m_head); VXGE_TX_UNLOCK(vpath); } if_qflush(ifp); } #endif static inline int vxge_xmit(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t *m_headp) { int err, num_segs = 0; u32 txdl_avail, dma_index, tagged = 0; dma_addr_t dma_addr; bus_size_t dma_sizes; void *dtr_priv; vxge_txdl_priv_t *txdl_priv; vxge_hal_txdl_h txdlh; vxge_hal_status_e status; vxge_dev_t *vdev = vpath->vdev; VXGE_DRV_STATS(vpath, tx_xmit); txdl_avail = vxge_hal_fifo_free_txdl_count_get(vpath->handle); if (txdl_avail < VXGE_TX_LOW_THRESHOLD) { VXGE_DRV_STATS(vpath, tx_low_dtr_cnt); err = ENOBUFS; goto _exit0; } /* Reserve descriptors */ status = vxge_hal_fifo_txdl_reserve(vpath->handle, &txdlh, &dtr_priv); if (status != VXGE_HAL_OK) { VXGE_DRV_STATS(vpath, tx_reserve_failed); err = ENOBUFS; goto _exit0; } /* Update Tx private structure for this descriptor */ txdl_priv = (vxge_txdl_priv_t *) dtr_priv; /* * Map the packet for DMA. * Returns number of segments through num_segs. */ err = vxge_dma_mbuf_coalesce(vpath->dma_tag_tx, txdl_priv->dma_map, m_headp, txdl_priv->dma_buffers, &num_segs); if (vpath->driver_stats.tx_max_frags < num_segs) vpath->driver_stats.tx_max_frags = num_segs; if (err == ENOMEM) { VXGE_DRV_STATS(vpath, tx_no_dma_setup); vxge_hal_fifo_txdl_free(vpath->handle, txdlh); goto _exit0; } else if (err != 0) { vxge_free_packet(*m_headp); VXGE_DRV_STATS(vpath, tx_no_dma_setup); vxge_hal_fifo_txdl_free(vpath->handle, txdlh); goto _exit0; } txdl_priv->mbuf_pkt = *m_headp; /* Set VLAN tag in descriptor only if this packet has it */ if ((*m_headp)->m_flags & M_VLANTAG) vxge_hal_fifo_txdl_vlan_set(txdlh, (*m_headp)->m_pkthdr.ether_vtag); /* Set descriptor buffer for header and each fragment/segment */ for (dma_index = 0; dma_index < num_segs; dma_index++) { dma_sizes = txdl_priv->dma_buffers[dma_index].ds_len; dma_addr = htole64(txdl_priv->dma_buffers[dma_index].ds_addr); vxge_hal_fifo_txdl_buffer_set(vpath->handle, txdlh, dma_index, dma_addr, dma_sizes); } /* Pre-write Sync of mapping */ bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map, BUS_DMASYNC_PREWRITE); if ((*m_headp)->m_pkthdr.csum_flags & CSUM_TSO) { if ((*m_headp)->m_pkthdr.tso_segsz) { VXGE_DRV_STATS(vpath, tx_tso); vxge_hal_fifo_txdl_lso_set(txdlh, VXGE_HAL_FIFO_LSO_FRM_ENCAP_AUTO, (*m_headp)->m_pkthdr.tso_segsz); } } /* Checksum */ if (ifp->if_hwassist > 0) { vxge_hal_fifo_txdl_cksum_set_bits(txdlh, VXGE_HAL_FIFO_TXD_TX_CKO_IPV4_EN | VXGE_HAL_FIFO_TXD_TX_CKO_TCP_EN | VXGE_HAL_FIFO_TXD_TX_CKO_UDP_EN); } if ((vxge_hal_device_check_id(vdev->devh) == VXGE_HAL_CARD_TITAN_1A) && (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0))) tagged = 1; vxge_hal_fifo_txdl_post(vpath->handle, txdlh, tagged); VXGE_DRV_STATS(vpath, tx_posted); _exit0: return (err); } /* * vxge_tx_replenish * Allocate buffers and set them into descriptors for later use */ /* ARGSUSED */ vxge_hal_status_e vxge_tx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen) { int err = 0; vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv; err = bus_dmamap_create(vpath->dma_tag_tx, BUS_DMA_NOWAIT, &txdl_priv->dma_map); return ((err == 0) ? VXGE_HAL_OK : VXGE_HAL_FAIL); } /* * vxge_tx_compl * If the interrupt is due to Tx completion, free the sent buffer */ vxge_hal_status_e vxge_tx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, void *dtr_priv, vxge_hal_fifo_tcode_e t_code, void *userdata) { vxge_hal_status_e status = VXGE_HAL_OK; vxge_txdl_priv_t *txdl_priv; vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_dev_t *vdev = vpath->vdev; ifnet_t ifp = vdev->ifp; VXGE_TX_LOCK(vpath); /* * For each completed descriptor * Get private structure, free buffer, do unmapping, and free descriptor */ do { VXGE_DRV_STATS(vpath, tx_compl); if (t_code != VXGE_HAL_FIFO_T_CODE_OK) { device_printf(vdev->ndev, "tx transfer code %d\n", t_code); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); VXGE_DRV_STATS(vpath, tx_tcode); vxge_hal_fifo_handle_tcode(vpath_handle, txdlh, t_code); } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); txdl_priv = (vxge_txdl_priv_t *) dtr_priv; bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map); vxge_free_packet(txdl_priv->mbuf_pkt); vxge_hal_fifo_txdl_free(vpath->handle, txdlh); } while (vxge_hal_fifo_txdl_next_completed(vpath_handle, &txdlh, &dtr_priv, &t_code) == VXGE_HAL_OK); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; VXGE_TX_UNLOCK(vpath); return (status); } /* ARGSUSED */ void vxge_tx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, void *dtr_priv, vxge_hal_txdl_state_e state, void *userdata, vxge_hal_reopen_e reopen) { vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv; if (state != VXGE_HAL_TXDL_STATE_POSTED) return; if (txdl_priv != NULL) { bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map); bus_dmamap_destroy(vpath->dma_tag_tx, txdl_priv->dma_map); vxge_free_packet(txdl_priv->mbuf_pkt); } /* Free the descriptor */ vxge_hal_fifo_txdl_free(vpath->handle, txdlh); } /* * vxge_rx_replenish * Allocate buffers and set them into descriptors for later use */ /* ARGSUSED */ vxge_hal_status_e vxge_rx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen) { int err = 0; vxge_hal_status_e status = VXGE_HAL_OK; vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; /* Create DMA map for these descriptors */ err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT, &rxd_priv->dma_map); if (err == 0) { if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) { bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map); status = VXGE_HAL_FAIL; } } return (status); } /* * vxge_rx_compl */ vxge_hal_status_e vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, void *dtr_priv, u8 t_code, void *userdata) { mbuf_t mbuf_up; vxge_rxd_priv_t *rxd_priv; vxge_hal_ring_rxd_info_t ext_info; vxge_hal_status_e status = VXGE_HAL_OK; vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_dev_t *vdev = vpath->vdev; struct lro_ctrl *lro = &vpath->lro; /* get the interface pointer */ ifnet_t ifp = vdev->ifp; do { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { vxge_hal_ring_rxd_post(vpath_handle, rxdh); status = VXGE_HAL_FAIL; break; } VXGE_DRV_STATS(vpath, rx_compl); rxd_priv = (vxge_rxd_priv_t *) dtr_priv; /* Gets details of mbuf i.e., packet length */ vxge_rx_rxd_1b_get(vpath, rxdh, dtr_priv); /* * Prepare one buffer to send it to upper layer Since upper * layer frees the buffer do not use rxd_priv->mbuf_pkt. * Meanwhile prepare a new buffer, do mapping, use with the * current descriptor and post descriptor back to ring vpath */ mbuf_up = rxd_priv->mbuf_pkt; if (t_code != VXGE_HAL_RING_RXD_T_CODE_OK) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); VXGE_DRV_STATS(vpath, rx_tcode); status = vxge_hal_ring_handle_tcode(vpath_handle, rxdh, t_code); /* * If transfer code is not for unknown protocols and * vxge_hal_device_handle_tcode is NOT returned * VXGE_HAL_OK * drop this packet and increment rx_tcode stats */ if ((status != VXGE_HAL_OK) && (t_code != VXGE_HAL_RING_T_CODE_L3_PKT_ERR)) { vxge_free_packet(mbuf_up); vxge_hal_ring_rxd_post(vpath_handle, rxdh); continue; } } if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) { /* * If unable to allocate buffer, post descriptor back * to vpath for future processing of same packet. */ vxge_hal_ring_rxd_post(vpath_handle, rxdh); continue; } /* Get the extended information */ vxge_hal_ring_rxd_1b_info_get(vpath_handle, rxdh, &ext_info); /* post descriptor with newly allocated mbuf back to vpath */ vxge_hal_ring_rxd_post(vpath_handle, rxdh); vpath->rxd_posted++; if (vpath->rxd_posted % VXGE_RXD_REPLENISH_COUNT == 0) vxge_hal_ring_rxd_post_post_db(vpath_handle); /* * Set successfully computed checksums in the mbuf. * Leave the rest to the stack to be reverified. */ vxge_rx_checksum(ext_info, mbuf_up); #if __FreeBSD_version >= 800000 M_HASHTYPE_SET(mbuf_up, M_HASHTYPE_OPAQUE); mbuf_up->m_pkthdr.flowid = vpath->vp_index; #endif /* Post-Read sync for buffers */ bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, BUS_DMASYNC_POSTREAD); vxge_rx_input(ifp, mbuf_up, vpath); } while (vxge_hal_ring_rxd_next_completed(vpath_handle, &rxdh, &dtr_priv, &t_code) == VXGE_HAL_OK); /* Flush any outstanding LRO work */ if (vpath->lro_enable && vpath->lro.lro_cnt) tcp_lro_flush_all(lro); return (status); } static inline void vxge_rx_input(ifnet_t ifp, mbuf_t mbuf_up, vxge_vpath_t *vpath) { if (vpath->lro_enable && vpath->lro.lro_cnt) { if (tcp_lro_rx(&vpath->lro, mbuf_up, 0) == 0) return; } (*ifp->if_input) (ifp, mbuf_up); } static inline void vxge_rx_checksum(vxge_hal_ring_rxd_info_t ext_info, mbuf_t mbuf_up) { if (!(ext_info.proto & VXGE_HAL_FRAME_PROTO_IP_FRAG) && (ext_info.proto & VXGE_HAL_FRAME_PROTO_TCP_OR_UDP) && ext_info.l3_cksum_valid && ext_info.l4_cksum_valid) { mbuf_up->m_pkthdr.csum_data = htons(0xffff); mbuf_up->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mbuf_up->m_pkthdr.csum_flags |= CSUM_IP_VALID; mbuf_up->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } else { if (ext_info.vlan) { mbuf_up->m_pkthdr.ether_vtag = ext_info.vlan; mbuf_up->m_flags |= M_VLANTAG; } } } /* * vxge_rx_term During unload terminate and free all descriptors * @vpath_handle Rx vpath Handle @rxdh Rx Descriptor Handle @state Descriptor * State @userdata Per-adapter Data @reopen vpath open/reopen option */ /* ARGSUSED */ void vxge_rx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, void *dtr_priv, vxge_hal_rxd_state_e state, void *userdata, vxge_hal_reopen_e reopen) { vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; if (state != VXGE_HAL_RXD_STATE_POSTED) return; if (rxd_priv != NULL) { bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map); bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map); vxge_free_packet(rxd_priv->mbuf_pkt); } /* Free the descriptor */ vxge_hal_ring_rxd_free(vpath_handle, rxdh); } /* * vxge_rx_rxd_1b_get * Get descriptors of packet to send up */ void vxge_rx_rxd_1b_get(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv) { vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; mbuf_t mbuf_up = rxd_priv->mbuf_pkt; /* Retrieve data from completed descriptor */ vxge_hal_ring_rxd_1b_get(vpath->handle, rxdh, &rxd_priv->dma_addr[0], (u32 *) &rxd_priv->dma_sizes[0]); /* Update newly created buffer to be sent up with packet length */ mbuf_up->m_len = rxd_priv->dma_sizes[0]; mbuf_up->m_pkthdr.len = rxd_priv->dma_sizes[0]; mbuf_up->m_next = NULL; } /* * vxge_rx_rxd_1b_set * Allocates new mbufs to be placed into descriptors */ int vxge_rx_rxd_1b_set(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv) { int num_segs, err = 0; mbuf_t mbuf_pkt; bus_dmamap_t dma_map; bus_dma_segment_t dma_buffers[1]; vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; vxge_dev_t *vdev = vpath->vdev; mbuf_pkt = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, vdev->rx_mbuf_sz); if (!mbuf_pkt) { err = ENOBUFS; VXGE_DRV_STATS(vpath, rx_no_buf); device_printf(vdev->ndev, "out of memory to allocate mbuf\n"); goto _exit0; } /* Update mbuf's length, packet length and receive interface */ mbuf_pkt->m_len = vdev->rx_mbuf_sz; mbuf_pkt->m_pkthdr.len = vdev->rx_mbuf_sz; mbuf_pkt->m_pkthdr.rcvif = vdev->ifp; /* Load DMA map */ err = vxge_dma_mbuf_coalesce(vpath->dma_tag_rx, vpath->extra_dma_map, &mbuf_pkt, dma_buffers, &num_segs); if (err != 0) { VXGE_DRV_STATS(vpath, rx_map_fail); vxge_free_packet(mbuf_pkt); goto _exit0; } /* Unload DMA map of mbuf in current descriptor */ bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map); /* Update descriptor private data */ dma_map = rxd_priv->dma_map; rxd_priv->mbuf_pkt = mbuf_pkt; rxd_priv->dma_addr[0] = htole64(dma_buffers->ds_addr); rxd_priv->dma_map = vpath->extra_dma_map; vpath->extra_dma_map = dma_map; /* Pre-Read/Write sync */ bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Set descriptor buffer */ vxge_hal_ring_rxd_1b_set(rxdh, rxd_priv->dma_addr[0], vdev->rx_mbuf_sz); _exit0: return (err); } /* * vxge_link_up * Callback for Link-up indication from HAL */ /* ARGSUSED */ void vxge_link_up(vxge_hal_device_h devh, void *userdata) { int i; vxge_vpath_t *vpath; vxge_hal_device_hw_info_t *hw_info; vxge_dev_t *vdev = (vxge_dev_t *) userdata; hw_info = &vdev->config.hw_info; ifnet_t ifp = vdev->ifp; if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); vxge_hal_vpath_tti_ci_set(vpath->handle); vxge_hal_vpath_rti_ci_set(vpath->handle); } } if (vdev->is_privilaged && (hw_info->ports > 1)) { vxge_active_port_update(vdev); device_printf(vdev->ndev, "Active Port : %lld\n", vdev->active_port); } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_UP); } /* * vxge_link_down * Callback for Link-down indication from HAL */ /* ARGSUSED */ void vxge_link_down(vxge_hal_device_h devh, void *userdata) { int i; vxge_vpath_t *vpath; vxge_dev_t *vdev = (vxge_dev_t *) userdata; ifnet_t ifp = vdev->ifp; if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); vxge_hal_vpath_tti_ci_reset(vpath->handle); vxge_hal_vpath_rti_ci_reset(vpath->handle); } } ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_DOWN); } /* * vxge_reset */ void vxge_reset(vxge_dev_t *vdev) { if (!vdev->is_initialized) return; VXGE_DRV_LOCK(vdev); vxge_stop_locked(vdev); vxge_init_locked(vdev); VXGE_DRV_UNLOCK(vdev); } /* * vxge_crit_error * Callback for Critical error indication from HAL */ /* ARGSUSED */ void vxge_crit_error(vxge_hal_device_h devh, void *userdata, vxge_hal_event_e type, u64 serr_data) { vxge_dev_t *vdev = (vxge_dev_t *) userdata; ifnet_t ifp = vdev->ifp; switch (type) { case VXGE_HAL_EVENT_SERR: case VXGE_HAL_EVENT_KDFCCTL: case VXGE_HAL_EVENT_CRITICAL: vxge_hal_device_intr_disable(vdev->devh); ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_link_state_change(ifp, LINK_STATE_DOWN); break; default: break; } } /* * vxge_ifp_setup */ int vxge_ifp_setup(device_t ndev) { ifnet_t ifp; int i, j, err = 0; vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev); for (i = 0, j = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { if (!bVAL1(vdev->config.hw_info.vpath_mask, i)) continue; if (j >= vdev->no_of_vpath) break; vdev->vpaths[j].vp_id = i; vdev->vpaths[j].vp_index = j; vdev->vpaths[j].vdev = vdev; vdev->vpaths[j].is_configured = TRUE; vxge_os_memcpy((u8 *) vdev->vpaths[j].mac_addr, (u8 *) (vdev->config.hw_info.mac_addrs[i]), (size_t) ETHER_ADDR_LEN); j++; } /* Get interface ifnet structure for this Ether device */ ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(vdev->ndev, "memory allocation for ifnet failed\n"); err = ENXIO; goto _exit0; } vdev->ifp = ifp; /* Initialize interface ifnet structure */ if_initname(ifp, device_get_name(ndev), device_get_unit(ndev)); ifp->if_baudrate = VXGE_BAUDRATE; ifp->if_init = vxge_init; ifp->if_softc = vdev; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vxge_ioctl; ifp->if_start = vxge_send; #if __FreeBSD_version >= 800000 ifp->if_transmit = vxge_mq_send; ifp->if_qflush = vxge_mq_qflush; #endif ifp->if_snd.ifq_drv_maxlen = max(vdev->config.ifq_maxlen, ifqmaxlen); IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); /* IFQ_SET_READY(&ifp->if_snd); */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_JUMBO_MTU; if (vdev->config.tso_enable) vxge_tso_config(vdev); if (vdev->config.lro_enable) ifp->if_capabilities |= IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; strlcpy(vdev->ndev_name, device_get_nameunit(ndev), sizeof(vdev->ndev_name)); /* Attach the interface */ ether_ifattach(ifp, vdev->vpaths[0].mac_addr); _exit0: return (err); } /* * vxge_isr_setup * Register isr functions */ int vxge_isr_setup(vxge_dev_t *vdev) { int i, irq_rid, err = 0; vxge_vpath_t *vpath; void *isr_func_arg; void (*isr_func_ptr) (void *); switch (vdev->config.intr_mode) { case VXGE_HAL_INTR_MODE_IRQLINE: err = bus_setup_intr(vdev->ndev, vdev->config.isr_info[0].irq_res, (INTR_TYPE_NET | INTR_MPSAFE), vxge_isr_filter, vxge_isr_line, vdev, &vdev->config.isr_info[0].irq_handle); break; case VXGE_HAL_INTR_MODE_MSIX: for (i = 0; i < vdev->intr_count; i++) { irq_rid = vdev->config.isr_info[i].irq_rid; vpath = &vdev->vpaths[irq_rid / 4]; if ((irq_rid % 4) == 2) { isr_func_ptr = vxge_isr_msix; isr_func_arg = (void *) vpath; } else if ((irq_rid % 4) == 3) { isr_func_ptr = vxge_isr_msix_alarm; isr_func_arg = (void *) vpath; } else break; err = bus_setup_intr(vdev->ndev, vdev->config.isr_info[i].irq_res, (INTR_TYPE_NET | INTR_MPSAFE), NULL, (void *) isr_func_ptr, (void *) isr_func_arg, &vdev->config.isr_info[i].irq_handle); if (err != 0) break; } if (err != 0) { /* Teardown interrupt handler */ while (--i > 0) bus_teardown_intr(vdev->ndev, vdev->config.isr_info[i].irq_res, vdev->config.isr_info[i].irq_handle); } break; } return (err); } /* * vxge_isr_filter * ISR filter function - filter interrupts from other shared devices */ int vxge_isr_filter(void *handle) { u64 val64 = 0; vxge_dev_t *vdev = (vxge_dev_t *) handle; __hal_device_t *hldev = (__hal_device_t *) vdev->devh; vxge_hal_common_reg_t *common_reg = (vxge_hal_common_reg_t *) (hldev->common_reg); val64 = vxge_os_pio_mem_read64(vdev->pdev, (vdev->devh)->regh0, &common_reg->titan_general_int_status); return ((val64) ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); } /* * vxge_isr_line * Interrupt service routine for Line interrupts */ void vxge_isr_line(void *vdev_ptr) { vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr; vxge_hal_device_handle_irq(vdev->devh, 0); } void vxge_isr_msix(void *vpath_ptr) { u32 got_rx = 0; u32 got_tx = 0; __hal_virtualpath_t *hal_vpath; vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr; vxge_dev_t *vdev = vpath->vdev; hal_vpath = ((__hal_vpath_handle_t *) vpath->handle)->vpath; VXGE_DRV_STATS(vpath, isr_msix); VXGE_HAL_DEVICE_STATS_SW_INFO_TRAFFIC_INTR(vdev->devh); vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec); /* processing rx */ vxge_hal_vpath_poll_rx(vpath->handle, &got_rx); /* processing tx */ if (hal_vpath->vp_config->fifo.enable) { vxge_intr_coalesce_tx(vpath); vxge_hal_vpath_poll_tx(vpath->handle, &got_tx); } vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec); } void vxge_isr_msix_alarm(void *vpath_ptr) { int i; vxge_hal_status_e status = VXGE_HAL_OK; vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr; vxge_dev_t *vdev = vpath->vdev; VXGE_HAL_DEVICE_STATS_SW_INFO_NOT_TRAFFIC_INTR(vdev->devh); /* Process alarms in each vpath */ for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec_alarm); status = vxge_hal_vpath_alarm_process(vpath->handle, 0); if ((status == VXGE_HAL_ERR_EVENT_SLOT_FREEZE) || (status == VXGE_HAL_ERR_EVENT_SERR)) { device_printf(vdev->ndev, "processing alarms urecoverable error %x\n", status); /* Stop the driver */ vdev->is_initialized = FALSE; break; } vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec_alarm); } } /* * vxge_msix_enable */ vxge_hal_status_e vxge_msix_enable(vxge_dev_t *vdev) { int i, first_vp_id, msix_id; vxge_vpath_t *vpath; vxge_hal_status_e status = VXGE_HAL_OK; /* * Unmasking and Setting MSIX vectors before enabling interrupts * tim[] : 0 - Tx ## 1 - Rx ## 2 - UMQ-DMQ ## 0 - BITMAP */ int tim[4] = {0, 1, 0, 0}; for (i = 0; i < vdev->no_of_vpath; i++) { vpath = vdev->vpaths + i; first_vp_id = vdev->vpaths[0].vp_id; msix_id = vpath->vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE; tim[1] = vpath->msix_vec = msix_id + 1; vpath->msix_vec_alarm = first_vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE + VXGE_HAL_VPATH_MSIX_ALARM_ID; status = vxge_hal_vpath_mf_msix_set(vpath->handle, tim, VXGE_HAL_VPATH_MSIX_ALARM_ID); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "failed to set msix vectors to vpath\n"); break; } vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec); vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec_alarm); } return (status); } /* * vxge_media_init * Initializes, adds and sets media */ void vxge_media_init(vxge_dev_t *vdev) { ifmedia_init(&vdev->media, IFM_IMASK, vxge_media_change, vxge_media_status); /* Add supported media */ ifmedia_add(&vdev->media, IFM_ETHER | vdev->ifm_optics | IFM_FDX, 0, NULL); /* Set media */ ifmedia_add(&vdev->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&vdev->media, IFM_ETHER | IFM_AUTO); } /* * vxge_media_status * Callback for interface media settings */ void vxge_media_status(ifnet_t ifp, struct ifmediareq *ifmr) { vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; vxge_hal_device_t *hldev = vdev->devh; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; /* set link state */ if (vxge_hal_device_link_state_get(hldev) == VXGE_HAL_LINK_UP) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= vdev->ifm_optics | IFM_FDX; if_link_state_change(ifp, LINK_STATE_UP); } } /* * vxge_media_change * Media change driver callback */ int vxge_media_change(ifnet_t ifp) { vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; struct ifmedia *ifmediap = &vdev->media; return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER ? EINVAL : 0); } /* * Allocate PCI resources */ int vxge_alloc_resources(vxge_dev_t *vdev) { int err = 0; vxge_pci_info_t *pci_info = NULL; vxge_free_resources_e error_level = VXGE_FREE_NONE; device_t ndev = vdev->ndev; /* Allocate Buffer for HAL Device Configuration */ vdev->device_config = (vxge_hal_device_config_t *) vxge_mem_alloc(sizeof(vxge_hal_device_config_t)); if (!vdev->device_config) { err = ENOMEM; error_level = VXGE_DISABLE_PCI_BUSMASTER; device_printf(vdev->ndev, "failed to allocate memory for device config\n"); goto _exit0; } pci_info = (vxge_pci_info_t *) vxge_mem_alloc(sizeof(vxge_pci_info_t)); if (!pci_info) { error_level = VXGE_FREE_DEVICE_CONFIG; err = ENOMEM; device_printf(vdev->ndev, "failed to allocate memory for pci info\n"); goto _exit0; } pci_info->ndev = ndev; vdev->pdev = pci_info; err = vxge_alloc_bar_resources(vdev, 0); if (err != 0) { error_level = VXGE_FREE_BAR0; goto _exit0; } err = vxge_alloc_bar_resources(vdev, 1); if (err != 0) { error_level = VXGE_FREE_BAR1; goto _exit0; } err = vxge_alloc_bar_resources(vdev, 2); if (err != 0) error_level = VXGE_FREE_BAR2; _exit0: if (error_level) vxge_free_resources(ndev, error_level); return (err); } /* * vxge_alloc_bar_resources * Allocates BAR resources */ int vxge_alloc_bar_resources(vxge_dev_t *vdev, int i) { int err = 0; int res_id = 0; vxge_pci_info_t *pci_info = vdev->pdev; res_id = PCIR_BAR((i == 0) ? 0 : (i * 2)); pci_info->bar_info[i] = bus_alloc_resource_any(vdev->ndev, SYS_RES_MEMORY, &res_id, RF_ACTIVE); if (pci_info->bar_info[i] == NULL) { device_printf(vdev->ndev, "failed to allocate memory for bus resources\n"); err = ENOMEM; goto _exit0; } pci_info->reg_map[i] = (vxge_bus_res_t *) vxge_mem_alloc(sizeof(vxge_bus_res_t)); if (pci_info->reg_map[i] == NULL) { device_printf(vdev->ndev, "failed to allocate memory bar resources\n"); err = ENOMEM; goto _exit0; } ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_tag = rman_get_bustag(pci_info->bar_info[i]); ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_handle = rman_get_bushandle(pci_info->bar_info[i]); ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bar_start_addr = pci_info->bar_info[i]; ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_res_len = rman_get_size(pci_info->bar_info[i]); _exit0: return (err); } /* * vxge_alloc_isr_resources */ int vxge_alloc_isr_resources(vxge_dev_t *vdev) { int i, err = 0, irq_rid; int msix_vec_reqd, intr_count, msix_count; int intr_mode = VXGE_HAL_INTR_MODE_IRQLINE; if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { /* MSI-X messages supported by device */ intr_count = pci_msix_count(vdev->ndev); if (intr_count) { msix_vec_reqd = 4 * vdev->no_of_vpath; if (intr_count >= msix_vec_reqd) { intr_count = msix_vec_reqd; err = pci_alloc_msix(vdev->ndev, &intr_count); if (err == 0) intr_mode = VXGE_HAL_INTR_MODE_MSIX; } if ((err != 0) || (intr_count < msix_vec_reqd)) { device_printf(vdev->ndev, "Unable to allocate " "msi/x vectors switching to INTA mode\n"); } } } err = 0; vdev->intr_count = 0; vdev->config.intr_mode = intr_mode; switch (vdev->config.intr_mode) { case VXGE_HAL_INTR_MODE_IRQLINE: vdev->config.isr_info[0].irq_rid = 0; vdev->config.isr_info[0].irq_res = bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, &vdev->config.isr_info[0].irq_rid, (RF_SHAREABLE | RF_ACTIVE)); if (vdev->config.isr_info[0].irq_res == NULL) { device_printf(vdev->ndev, "failed to allocate line interrupt resource\n"); err = ENOMEM; goto _exit0; } vdev->intr_count++; break; case VXGE_HAL_INTR_MODE_MSIX: msix_count = 0; for (i = 0; i < vdev->no_of_vpath; i++) { irq_rid = i * 4; vdev->config.isr_info[msix_count].irq_rid = irq_rid + 2; vdev->config.isr_info[msix_count].irq_res = bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, &vdev->config.isr_info[msix_count].irq_rid, (RF_SHAREABLE | RF_ACTIVE)); if (vdev->config.isr_info[msix_count].irq_res == NULL) { device_printf(vdev->ndev, "allocating bus resource (rid %d) failed\n", vdev->config.isr_info[msix_count].irq_rid); err = ENOMEM; goto _exit0; } vdev->intr_count++; err = bus_bind_intr(vdev->ndev, vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus)); if (err != 0) break; msix_count++; } vdev->config.isr_info[msix_count].irq_rid = 3; vdev->config.isr_info[msix_count].irq_res = bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, &vdev->config.isr_info[msix_count].irq_rid, (RF_SHAREABLE | RF_ACTIVE)); if (vdev->config.isr_info[msix_count].irq_res == NULL) { device_printf(vdev->ndev, "allocating bus resource (rid %d) failed\n", vdev->config.isr_info[msix_count].irq_rid); err = ENOMEM; goto _exit0; } vdev->intr_count++; err = bus_bind_intr(vdev->ndev, vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus)); break; } vdev->device_config->intr_mode = vdev->config.intr_mode; _exit0: return (err); } /* * vxge_free_resources * Undo what-all we did during load/attach */ void vxge_free_resources(device_t ndev, vxge_free_resources_e vxge_free_resource) { int i; vxge_dev_t *vdev; vdev = (vxge_dev_t *) device_get_softc(ndev); switch (vxge_free_resource) { case VXGE_FREE_ALL: for (i = 0; i < vdev->intr_count; i++) { bus_teardown_intr(ndev, vdev->config.isr_info[i].irq_res, vdev->config.isr_info[i].irq_handle); } /* FALLTHROUGH */ case VXGE_FREE_INTERFACE: ether_ifdetach(vdev->ifp); bus_generic_detach(ndev); if_free(vdev->ifp); /* FALLTHROUGH */ case VXGE_FREE_MEDIA: ifmedia_removeall(&vdev->media); /* FALLTHROUGH */ case VXGE_FREE_MUTEX: vxge_mutex_destroy(vdev); /* FALLTHROUGH */ case VXGE_FREE_VPATH: vxge_mem_free(vdev->vpaths, vdev->no_of_vpath * sizeof(vxge_vpath_t)); /* FALLTHROUGH */ case VXGE_FREE_TERMINATE_DEVICE: if (vdev->devh != NULL) { vxge_hal_device_private_set(vdev->devh, 0); vxge_hal_device_terminate(vdev->devh); } /* FALLTHROUGH */ case VXGE_FREE_ISR_RESOURCE: vxge_free_isr_resources(vdev); /* FALLTHROUGH */ case VXGE_FREE_BAR2: vxge_free_bar_resources(vdev, 2); /* FALLTHROUGH */ case VXGE_FREE_BAR1: vxge_free_bar_resources(vdev, 1); /* FALLTHROUGH */ case VXGE_FREE_BAR0: vxge_free_bar_resources(vdev, 0); /* FALLTHROUGH */ case VXGE_FREE_PCI_INFO: vxge_mem_free(vdev->pdev, sizeof(vxge_pci_info_t)); /* FALLTHROUGH */ case VXGE_FREE_DEVICE_CONFIG: vxge_mem_free(vdev->device_config, sizeof(vxge_hal_device_config_t)); /* FALLTHROUGH */ case VXGE_DISABLE_PCI_BUSMASTER: pci_disable_busmaster(ndev); /* FALLTHROUGH */ case VXGE_FREE_TERMINATE_DRIVER: if (vxge_dev_ref_count) { --vxge_dev_ref_count; if (0 == vxge_dev_ref_count) vxge_hal_driver_terminate(); } /* FALLTHROUGH */ default: case VXGE_FREE_NONE: break; /* NOTREACHED */ } } void vxge_free_isr_resources(vxge_dev_t *vdev) { int i; switch (vdev->config.intr_mode) { case VXGE_HAL_INTR_MODE_IRQLINE: if (vdev->config.isr_info[0].irq_res) { bus_release_resource(vdev->ndev, SYS_RES_IRQ, vdev->config.isr_info[0].irq_rid, vdev->config.isr_info[0].irq_res); vdev->config.isr_info[0].irq_res = NULL; } break; case VXGE_HAL_INTR_MODE_MSIX: for (i = 0; i < vdev->intr_count; i++) { if (vdev->config.isr_info[i].irq_res) { bus_release_resource(vdev->ndev, SYS_RES_IRQ, vdev->config.isr_info[i].irq_rid, vdev->config.isr_info[i].irq_res); vdev->config.isr_info[i].irq_res = NULL; } } if (vdev->intr_count) pci_release_msi(vdev->ndev); break; } } void vxge_free_bar_resources(vxge_dev_t *vdev, int i) { int res_id = 0; vxge_pci_info_t *pci_info = vdev->pdev; res_id = PCIR_BAR((i == 0) ? 0 : (i * 2)); if (pci_info->bar_info[i]) bus_release_resource(vdev->ndev, SYS_RES_MEMORY, res_id, pci_info->bar_info[i]); vxge_mem_free(pci_info->reg_map[i], sizeof(vxge_bus_res_t)); } /* * vxge_init_mutex * Initializes mutexes used in driver */ void vxge_mutex_init(vxge_dev_t *vdev) { int i; snprintf(vdev->mtx_drv_name, sizeof(vdev->mtx_drv_name), "%s_drv", vdev->ndev_name); mtx_init(&vdev->mtx_drv, vdev->mtx_drv_name, MTX_NETWORK_LOCK, MTX_DEF); for (i = 0; i < vdev->no_of_vpath; i++) { snprintf(vdev->vpaths[i].mtx_tx_name, sizeof(vdev->vpaths[i].mtx_tx_name), "%s_tx_%d", vdev->ndev_name, i); mtx_init(&vdev->vpaths[i].mtx_tx, vdev->vpaths[i].mtx_tx_name, NULL, MTX_DEF); } } /* * vxge_mutex_destroy * Destroys mutexes used in driver */ void vxge_mutex_destroy(vxge_dev_t *vdev) { int i; for (i = 0; i < vdev->no_of_vpath; i++) VXGE_TX_LOCK_DESTROY(&(vdev->vpaths[i])); VXGE_DRV_LOCK_DESTROY(vdev); } /* * vxge_rth_config */ vxge_hal_status_e vxge_rth_config(vxge_dev_t *vdev) { int i; vxge_hal_vpath_h vpath_handle; vxge_hal_rth_hash_types_t hash_types; vxge_hal_status_e status = VXGE_HAL_OK; u8 mtable[256] = {0}; /* Filling matable with bucket-to-vpath mapping */ vdev->config.rth_bkt_sz = VXGE_DEFAULT_RTH_BUCKET_SIZE; for (i = 0; i < (1 << vdev->config.rth_bkt_sz); i++) mtable[i] = i % vdev->no_of_vpath; /* Fill RTH hash types */ hash_types.hash_type_tcpipv4_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV4; hash_types.hash_type_tcpipv6_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6; hash_types.hash_type_tcpipv6ex_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6_EX; hash_types.hash_type_ipv4_en = VXGE_HAL_RING_HASH_TYPE_IPV4; hash_types.hash_type_ipv6_en = VXGE_HAL_RING_HASH_TYPE_IPV6; hash_types.hash_type_ipv6ex_en = VXGE_HAL_RING_HASH_TYPE_IPV6_EX; /* set indirection table, bucket-to-vpath mapping */ status = vxge_hal_vpath_rts_rth_itable_set(vdev->vpath_handles, vdev->no_of_vpath, mtable, ((u32) (1 << vdev->config.rth_bkt_sz))); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "rth configuration failed\n"); goto _exit0; } for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; status = vxge_hal_vpath_rts_rth_set(vpath_handle, RTH_ALG_JENKINS, &hash_types, vdev->config.rth_bkt_sz, TRUE); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "rth configuration failed for vpath (%d)\n", vdev->vpaths[i].vp_id); break; } } _exit0: return (status); } /* * vxge_vpath_config * Sets HAL parameter values from kenv */ void vxge_vpath_config(vxge_dev_t *vdev) { int i; u32 no_of_vpath = 0; vxge_hal_vp_config_t *vp_config; vxge_hal_device_config_t *device_config = vdev->device_config; device_config->debug_level = VXGE_TRACE; device_config->debug_mask = VXGE_COMPONENT_ALL; device_config->device_poll_millis = VXGE_DEFAULT_DEVICE_POLL_MILLIS; vdev->config.no_of_vpath = min(vdev->config.no_of_vpath, vdev->max_supported_vpath); for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { vp_config = &(device_config->vp_config[i]); vp_config->fifo.enable = VXGE_HAL_FIFO_DISABLE; vp_config->ring.enable = VXGE_HAL_RING_DISABLE; } for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { if (no_of_vpath >= vdev->config.no_of_vpath) break; if (!bVAL1(vdev->config.hw_info.vpath_mask, i)) continue; no_of_vpath++; vp_config = &(device_config->vp_config[i]); vp_config->mtu = VXGE_HAL_DEFAULT_MTU; vp_config->ring.enable = VXGE_HAL_RING_ENABLE; vp_config->ring.post_mode = VXGE_HAL_RING_POST_MODE_DOORBELL; vp_config->ring.buffer_mode = VXGE_HAL_RING_RXD_BUFFER_MODE_1; vp_config->ring.ring_length = vxge_ring_length_get(VXGE_HAL_RING_RXD_BUFFER_MODE_1); vp_config->ring.scatter_mode = VXGE_HAL_RING_SCATTER_MODE_A; vp_config->rpa_all_vid_en = VXGE_DEFAULT_ALL_VID_ENABLE; vp_config->rpa_strip_vlan_tag = VXGE_DEFAULT_STRIP_VLAN_TAG; vp_config->rpa_ucast_all_addr_en = VXGE_HAL_VPATH_RPA_UCAST_ALL_ADDR_DISABLE; vp_config->rti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE; vp_config->rti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE; vp_config->rti.util_sel = VXGE_HAL_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; vp_config->rti.uec_a = VXGE_DEFAULT_RTI_RX_UFC_A; vp_config->rti.uec_b = VXGE_DEFAULT_RTI_RX_UFC_B; vp_config->rti.uec_c = VXGE_DEFAULT_RTI_RX_UFC_C; vp_config->rti.uec_d = VXGE_DEFAULT_RTI_RX_UFC_D; vp_config->rti.urange_a = VXGE_DEFAULT_RTI_RX_URANGE_A; vp_config->rti.urange_b = VXGE_DEFAULT_RTI_RX_URANGE_B; vp_config->rti.urange_c = VXGE_DEFAULT_RTI_RX_URANGE_C; vp_config->rti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE; vp_config->rti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE; vp_config->rti.btimer_val = (VXGE_DEFAULT_RTI_BTIMER_VAL * 1000) / 272; vp_config->rti.rtimer_val = (VXGE_DEFAULT_RTI_RTIMER_VAL * 1000) / 272; vp_config->rti.ltimer_val = (VXGE_DEFAULT_RTI_LTIMER_VAL * 1000) / 272; if ((no_of_vpath > 1) && (VXGE_DEFAULT_CONFIG_MQ_ENABLE == 0)) continue; vp_config->fifo.enable = VXGE_HAL_FIFO_ENABLE; vp_config->fifo.max_aligned_frags = VXGE_DEFAULT_FIFO_ALIGNED_FRAGS; vp_config->tti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE; vp_config->tti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE; vp_config->tti.util_sel = VXGE_HAL_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; vp_config->tti.uec_a = VXGE_DEFAULT_TTI_TX_UFC_A; vp_config->tti.uec_b = VXGE_DEFAULT_TTI_TX_UFC_B; vp_config->tti.uec_c = VXGE_DEFAULT_TTI_TX_UFC_C; vp_config->tti.uec_d = VXGE_DEFAULT_TTI_TX_UFC_D; vp_config->tti.urange_a = VXGE_DEFAULT_TTI_TX_URANGE_A; vp_config->tti.urange_b = VXGE_DEFAULT_TTI_TX_URANGE_B; vp_config->tti.urange_c = VXGE_DEFAULT_TTI_TX_URANGE_C; vp_config->tti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE; vp_config->tti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE; vp_config->tti.btimer_val = (VXGE_DEFAULT_TTI_BTIMER_VAL * 1000) / 272; vp_config->tti.rtimer_val = (VXGE_DEFAULT_TTI_RTIMER_VAL * 1000) / 272; vp_config->tti.ltimer_val = (VXGE_DEFAULT_TTI_LTIMER_VAL * 1000) / 272; } vdev->no_of_vpath = no_of_vpath; if (vdev->no_of_vpath == 1) vdev->config.tx_steering = 0; if (vdev->config.rth_enable && (vdev->no_of_vpath > 1)) { device_config->rth_en = VXGE_HAL_RTH_ENABLE; device_config->rth_it_type = VXGE_HAL_RTH_IT_TYPE_MULTI_IT; } vdev->config.rth_enable = device_config->rth_en; } /* * vxge_vpath_cb_fn * Virtual path Callback function */ /* ARGSUSED */ static vxge_hal_status_e vxge_vpath_cb_fn(vxge_hal_client_h client_handle, vxge_hal_up_msg_h msgh, vxge_hal_message_type_e msg_type, vxge_hal_obj_id_t obj_id, vxge_hal_result_e result, vxge_hal_opaque_handle_t *opaque_handle) { return (VXGE_HAL_OK); } /* * vxge_vpath_open */ int vxge_vpath_open(vxge_dev_t *vdev) { int i, err = EINVAL; u64 func_id; vxge_vpath_t *vpath; vxge_hal_vpath_attr_t vpath_attr; vxge_hal_status_e status = VXGE_HAL_OK; struct lro_ctrl *lro = NULL; bzero(&vpath_attr, sizeof(vxge_hal_vpath_attr_t)); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); lro = &vpath->lro; /* Vpath vpath_attr: FIFO */ vpath_attr.vp_id = vpath->vp_id; vpath_attr.fifo_attr.callback = vxge_tx_compl; vpath_attr.fifo_attr.txdl_init = vxge_tx_replenish; vpath_attr.fifo_attr.txdl_term = vxge_tx_term; vpath_attr.fifo_attr.userdata = vpath; vpath_attr.fifo_attr.per_txdl_space = sizeof(vxge_txdl_priv_t); /* Vpath vpath_attr: Ring */ vpath_attr.ring_attr.callback = vxge_rx_compl; vpath_attr.ring_attr.rxd_init = vxge_rx_replenish; vpath_attr.ring_attr.rxd_term = vxge_rx_term; vpath_attr.ring_attr.userdata = vpath; vpath_attr.ring_attr.per_rxd_space = sizeof(vxge_rxd_priv_t); err = vxge_dma_tags_create(vpath); if (err != 0) { device_printf(vdev->ndev, "failed to create dma tags\n"); break; } #if __FreeBSD_version >= 800000 vpath->br = buf_ring_alloc(VXGE_DEFAULT_BR_SIZE, M_DEVBUF, M_WAITOK, &vpath->mtx_tx); if (vpath->br == NULL) { err = ENOMEM; break; } #endif status = vxge_hal_vpath_open(vdev->devh, &vpath_attr, (vxge_hal_vpath_callback_f) vxge_vpath_cb_fn, NULL, &vpath->handle); if (status != VXGE_HAL_OK) { device_printf(vdev->ndev, "failed to open vpath (%d)\n", vpath->vp_id); err = EPERM; break; } vpath->is_open = TRUE; vdev->vpath_handles[i] = vpath->handle; vpath->tx_ticks = ticks; vpath->rx_ticks = ticks; vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL; vpath->rti_rtimer_val = VXGE_DEFAULT_RTI_RTIMER_VAL; vpath->tx_intr_coalesce = vdev->config.intr_coalesce; vpath->rx_intr_coalesce = vdev->config.intr_coalesce; func_id = vdev->config.hw_info.func_id; if (vdev->config.low_latency && (vdev->config.bw_info[func_id].priority == VXGE_DEFAULT_VPATH_PRIORITY_HIGH)) { vpath->tx_intr_coalesce = 0; } if (vdev->ifp->if_capenable & IFCAP_LRO) { err = tcp_lro_init(lro); if (err != 0) { device_printf(vdev->ndev, "LRO Initialization failed!\n"); break; } vpath->lro_enable = TRUE; lro->ifp = vdev->ifp; } } return (err); } void vxge_tso_config(vxge_dev_t *vdev) { u32 func_id, priority; vxge_hal_status_e status = VXGE_HAL_OK; vdev->ifp->if_capabilities |= IFCAP_TSO4; status = vxge_bw_priority_get(vdev, NULL); if (status == VXGE_HAL_OK) { func_id = vdev->config.hw_info.func_id; priority = vdev->config.bw_info[func_id].priority; if (priority != VXGE_DEFAULT_VPATH_PRIORITY_HIGH) vdev->ifp->if_capabilities &= ~IFCAP_TSO4; } #if __FreeBSD_version >= 800000 if (vdev->ifp->if_capabilities & IFCAP_TSO4) vdev->ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif } vxge_hal_status_e vxge_bw_priority_get(vxge_dev_t *vdev, vxge_bw_info_t *bw_info) { u32 priority, bandwidth; u32 vpath_count; u64 func_id, func_mode, vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS]; vxge_hal_status_e status = VXGE_HAL_OK; func_id = vdev->config.hw_info.func_id; if (bw_info) { func_id = bw_info->func_id; func_mode = vdev->config.hw_info.function_mode; if ((is_single_func(func_mode)) && (func_id > 0)) return (VXGE_HAL_FAIL); } if (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)) { status = vxge_hal_vf_rx_bw_get(vdev->devh, func_id, &bandwidth, &priority); } else { status = vxge_hal_get_vpath_list(vdev->devh, func_id, vpath_list, &vpath_count); if (status == VXGE_HAL_OK) { status = vxge_hal_bw_priority_get(vdev->devh, vpath_list[0], &bandwidth, &priority); } } if (status == VXGE_HAL_OK) { if (bw_info) { bw_info->priority = priority; bw_info->bandwidth = bandwidth; } else { vdev->config.bw_info[func_id].priority = priority; vdev->config.bw_info[func_id].bandwidth = bandwidth; } } return (status); } /* * close vpaths */ void vxge_vpath_close(vxge_dev_t *vdev) { int i; vxge_vpath_t *vpath; for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); if (vpath->handle) vxge_hal_vpath_close(vpath->handle); #if __FreeBSD_version >= 800000 if (vpath->br != NULL) buf_ring_free(vpath->br, M_DEVBUF); #endif /* Free LRO memory */ if (vpath->lro_enable) tcp_lro_free(&vpath->lro); if (vpath->dma_tag_rx) { bus_dmamap_destroy(vpath->dma_tag_rx, vpath->extra_dma_map); bus_dma_tag_destroy(vpath->dma_tag_rx); } if (vpath->dma_tag_tx) bus_dma_tag_destroy(vpath->dma_tag_tx); vpath->handle = NULL; vpath->is_open = FALSE; } } /* * reset vpaths */ void vxge_vpath_reset(vxge_dev_t *vdev) { int i; vxge_hal_vpath_h vpath_handle; vxge_hal_status_e status = VXGE_HAL_OK; for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; status = vxge_hal_vpath_reset(vpath_handle); if (status != VXGE_HAL_OK) device_printf(vdev->ndev, "failed to reset vpath :%d\n", i); } } static inline int vxge_vpath_get(vxge_dev_t *vdev, mbuf_t mhead) { struct tcphdr *th = NULL; struct udphdr *uh = NULL; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; struct ether_vlan_header *eth = NULL; void *ulp = NULL; int ehdrlen, iphlen = 0; u8 ipproto = 0; u16 etype, src_port, dst_port; u16 queue_len, counter = 0; src_port = dst_port = 0; queue_len = vdev->no_of_vpath; eth = mtod(mhead, struct ether_vlan_header *); if (eth->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eth->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eth->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { case ETHERTYPE_IP: ip = (struct ip *) (mhead->m_data + ehdrlen); iphlen = ip->ip_hl << 2; ipproto = ip->ip_p; th = (struct tcphdr *) ((caddr_t)ip + iphlen); uh = (struct udphdr *) ((caddr_t)ip + iphlen); break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *) (mhead->m_data + ehdrlen); iphlen = sizeof(struct ip6_hdr); ipproto = ip6->ip6_nxt; ulp = mtod(mhead, char *) + iphlen; th = ((struct tcphdr *) (ulp)); uh = ((struct udphdr *) (ulp)); break; default: break; } switch (ipproto) { case IPPROTO_TCP: src_port = th->th_sport; dst_port = th->th_dport; break; case IPPROTO_UDP: src_port = uh->uh_sport; dst_port = uh->uh_dport; break; default: break; } counter = (ntohs(src_port) + ntohs(dst_port)) & vpath_selector[queue_len - 1]; if (counter >= queue_len) counter = queue_len - 1; return (counter); } static inline vxge_hal_vpath_h vxge_vpath_handle_get(vxge_dev_t *vdev, int i) { return (vdev->vpaths[i].is_open ? vdev->vpaths[i].handle : NULL); } int vxge_firmware_verify(vxge_dev_t *vdev) { int err = 0; u64 active_config; vxge_hal_status_e status = VXGE_HAL_FAIL; if (vdev->fw_upgrade) { status = vxge_firmware_upgrade(vdev); if (status == VXGE_HAL_OK) { err = ENXIO; goto _exit0; } } if ((vdev->config.function_mode != VXGE_DEFAULT_CONFIG_VALUE) && (vdev->config.hw_info.function_mode != (u64) vdev->config.function_mode)) { status = vxge_func_mode_set(vdev); if (status == VXGE_HAL_OK) err = ENXIO; } /* l2_switch configuration */ active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_L2SwitchEnabled, &active_config); if (status == VXGE_HAL_OK) { vdev->l2_switch = active_config; if (vdev->config.l2_switch != VXGE_DEFAULT_CONFIG_VALUE) { if (vdev->config.l2_switch != active_config) { status = vxge_l2switch_mode_set(vdev); if (status == VXGE_HAL_OK) err = ENXIO; } } } if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { if (vxge_port_mode_update(vdev) == ENXIO) err = ENXIO; } _exit0: if (err == ENXIO) device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n"); return (err); } vxge_hal_status_e vxge_firmware_upgrade(vxge_dev_t *vdev) { u8 *fw_buffer; u32 fw_size; vxge_hal_device_hw_info_t *hw_info; vxge_hal_status_e status = VXGE_HAL_OK; hw_info = &vdev->config.hw_info; fw_size = sizeof(VXGE_FW_ARRAY_NAME); fw_buffer = (u8 *) VXGE_FW_ARRAY_NAME; device_printf(vdev->ndev, "Current firmware version : %s (%s)\n", hw_info->fw_version.version, hw_info->fw_date.date); device_printf(vdev->ndev, "Upgrading firmware to %d.%d.%d\n", VXGE_MIN_FW_MAJOR_VERSION, VXGE_MIN_FW_MINOR_VERSION, VXGE_MIN_FW_BUILD_NUMBER); /* Call HAL API to upgrade firmware */ status = vxge_hal_mrpcim_fw_upgrade(vdev->pdev, (pci_reg_h) vdev->pdev->reg_map[0], (u8 *) vdev->pdev->bar_info[0], fw_buffer, fw_size); device_printf(vdev->ndev, "firmware upgrade %s\n", (status == VXGE_HAL_OK) ? "successful" : "failed"); return (status); } vxge_hal_status_e vxge_func_mode_set(vxge_dev_t *vdev) { u64 active_config; vxge_hal_status_e status = VXGE_HAL_FAIL; status = vxge_hal_mrpcim_pcie_func_mode_set(vdev->devh, vdev->config.function_mode); device_printf(vdev->ndev, "function mode change %s\n", (status == VXGE_HAL_OK) ? "successful" : "failed"); if (status == VXGE_HAL_OK) { vxge_hal_set_fw_api(vdev->devh, 0ULL, VXGE_HAL_API_FUNC_MODE_COMMIT, 0, 0ULL, 0ULL); vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, &active_config); /* * If in MF + DP mode * if user changes to SF, change port_mode to single port mode */ if (((is_multi_func(vdev->config.hw_info.function_mode)) && is_single_func(vdev->config.function_mode)) && (active_config == VXGE_HAL_DP_NP_MODE_DUAL_PORT)) { vdev->config.port_mode = VXGE_HAL_DP_NP_MODE_SINGLE_PORT; status = vxge_port_mode_set(vdev); } } return (status); } vxge_hal_status_e vxge_port_mode_set(vxge_dev_t *vdev) { vxge_hal_status_e status = VXGE_HAL_FAIL; status = vxge_hal_set_port_mode(vdev->devh, vdev->config.port_mode); device_printf(vdev->ndev, "port mode change %s\n", (status == VXGE_HAL_OK) ? "successful" : "failed"); if (status == VXGE_HAL_OK) { vxge_hal_set_fw_api(vdev->devh, 0ULL, VXGE_HAL_API_FUNC_MODE_COMMIT, 0, 0ULL, 0ULL); /* Configure vpath_mapping for active-active mode only */ if (vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) { status = vxge_hal_config_vpath_map(vdev->devh, VXGE_DUAL_PORT_MAP); device_printf(vdev->ndev, "dual port map change %s\n", (status == VXGE_HAL_OK) ? "successful" : "failed"); } } return (status); } int vxge_port_mode_update(vxge_dev_t *vdev) { int err = 0; u64 active_config; vxge_hal_status_e status = VXGE_HAL_FAIL; if ((vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) && is_single_func(vdev->config.hw_info.function_mode)) { device_printf(vdev->ndev, "Adapter in SF mode, dual port mode is not allowed\n"); err = EPERM; goto _exit0; } active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, &active_config); if (status != VXGE_HAL_OK) { err = EINVAL; goto _exit0; } vdev->port_mode = active_config; if (vdev->config.port_mode != VXGE_DEFAULT_CONFIG_VALUE) { if (vdev->config.port_mode != vdev->port_mode) { status = vxge_port_mode_set(vdev); if (status != VXGE_HAL_OK) { err = EINVAL; goto _exit0; } err = ENXIO; vdev->port_mode = vdev->config.port_mode; } } active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail, &active_config); if (status != VXGE_HAL_OK) { err = EINVAL; goto _exit0; } vdev->port_failure = active_config; /* * active/active mode : set to NoMove * active/passive mode: set to Failover-Failback */ if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) vdev->config.port_failure = VXGE_HAL_XMAC_NWIF_OnFailure_NoMove; else if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_ACTIVE_PASSIVE) vdev->config.port_failure = VXGE_HAL_XMAC_NWIF_OnFailure_OtherPortBackOnRestore; if ((vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) && (vdev->config.port_failure != vdev->port_failure)) { status = vxge_port_behavior_on_failure_set(vdev); if (status == VXGE_HAL_OK) err = ENXIO; } _exit0: return (err); } vxge_hal_status_e vxge_port_mode_get(vxge_dev_t *vdev, vxge_port_info_t *port_info) { int err = 0; u64 active_config; vxge_hal_status_e status = VXGE_HAL_FAIL; active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, &active_config); if (status != VXGE_HAL_OK) { err = ENXIO; goto _exit0; } port_info->port_mode = active_config; active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail, &active_config); if (status != VXGE_HAL_OK) { err = ENXIO; goto _exit0; } port_info->port_failure = active_config; _exit0: return (err); } vxge_hal_status_e vxge_port_behavior_on_failure_set(vxge_dev_t *vdev) { vxge_hal_status_e status = VXGE_HAL_FAIL; status = vxge_hal_set_behavior_on_failure(vdev->devh, vdev->config.port_failure); device_printf(vdev->ndev, "port behaviour on failure change %s\n", (status == VXGE_HAL_OK) ? "successful" : "failed"); if (status == VXGE_HAL_OK) vxge_hal_set_fw_api(vdev->devh, 0ULL, VXGE_HAL_API_FUNC_MODE_COMMIT, 0, 0ULL, 0ULL); return (status); } void vxge_active_port_update(vxge_dev_t *vdev) { u64 active_config; vxge_hal_status_e status = VXGE_HAL_FAIL; active_config = VXGE_DEFAULT_CONFIG_VALUE; status = vxge_hal_get_active_config(vdev->devh, VXGE_HAL_XMAC_NWIF_ActConfig_ActivePort, &active_config); if (status == VXGE_HAL_OK) vdev->active_port = active_config; } vxge_hal_status_e vxge_l2switch_mode_set(vxge_dev_t *vdev) { vxge_hal_status_e status = VXGE_HAL_FAIL; status = vxge_hal_set_l2switch_mode(vdev->devh, vdev->config.l2_switch); device_printf(vdev->ndev, "L2 switch %s\n", (status == VXGE_HAL_OK) ? (vdev->config.l2_switch) ? "enable" : "disable" : "change failed"); if (status == VXGE_HAL_OK) vxge_hal_set_fw_api(vdev->devh, 0ULL, VXGE_HAL_API_FUNC_MODE_COMMIT, 0, 0ULL, 0ULL); return (status); } /* * vxge_promisc_set * Enable Promiscuous Mode */ void vxge_promisc_set(vxge_dev_t *vdev) { int i; ifnet_t ifp; vxge_hal_vpath_h vpath_handle; if (!vdev->is_initialized) return; ifp = vdev->ifp; for (i = 0; i < vdev->no_of_vpath; i++) { vpath_handle = vxge_vpath_handle_get(vdev, i); if (!vpath_handle) continue; if (ifp->if_flags & IFF_PROMISC) vxge_hal_vpath_promisc_enable(vpath_handle); else vxge_hal_vpath_promisc_disable(vpath_handle); } } /* * vxge_change_mtu * Change interface MTU to a requested valid size */ int vxge_change_mtu(vxge_dev_t *vdev, unsigned long new_mtu) { int err = EINVAL; if ((new_mtu < VXGE_HAL_MIN_MTU) || (new_mtu > VXGE_HAL_MAX_MTU)) goto _exit0; (vdev->ifp)->if_mtu = new_mtu; device_printf(vdev->ndev, "MTU changed to %u\n", (vdev->ifp)->if_mtu); if (vdev->is_initialized) { if_down(vdev->ifp); vxge_reset(vdev); if_up(vdev->ifp); } err = 0; _exit0: return (err); } /* * Creates DMA tags for both Tx and Rx */ int vxge_dma_tags_create(vxge_vpath_t *vpath) { int err = 0; bus_size_t max_size, boundary; vxge_dev_t *vdev = vpath->vdev; ifnet_t ifp = vdev->ifp; max_size = ifp->if_mtu + VXGE_HAL_MAC_HEADER_MAX_SIZE + VXGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN; VXGE_BUFFER_ALIGN(max_size, 128) if (max_size <= MCLBYTES) vdev->rx_mbuf_sz = MCLBYTES; else vdev->rx_mbuf_sz = (max_size > MJUMPAGESIZE) ? MJUM9BYTES : MJUMPAGESIZE; boundary = (max_size > PAGE_SIZE) ? 0 : PAGE_SIZE; /* DMA tag for Tx */ err = bus_dma_tag_create( bus_get_dma_tag(vdev->ndev), 1, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, VXGE_TSO_SIZE, VXGE_MAX_SEGS, PAGE_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &(vpath->dma_tag_tx)); if (err != 0) goto _exit0; /* DMA tag for Rx */ err = bus_dma_tag_create( bus_get_dma_tag(vdev->ndev), 1, boundary, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, vdev->rx_mbuf_sz, 1, vdev->rx_mbuf_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &(vpath->dma_tag_rx)); if (err != 0) goto _exit1; /* Create DMA map for this descriptor */ err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT, &vpath->extra_dma_map); if (err == 0) goto _exit0; bus_dma_tag_destroy(vpath->dma_tag_rx); _exit1: bus_dma_tag_destroy(vpath->dma_tag_tx); _exit0: return (err); } static inline int vxge_dma_mbuf_coalesce(bus_dma_tag_t dma_tag_tx, bus_dmamap_t dma_map, mbuf_t * m_headp, bus_dma_segment_t * dma_buffers, int *num_segs) { int err = 0; mbuf_t mbuf_pkt = NULL; retry: err = bus_dmamap_load_mbuf_sg(dma_tag_tx, dma_map, *m_headp, dma_buffers, num_segs, BUS_DMA_NOWAIT); if (err == EFBIG) { /* try to defrag, too many segments */ mbuf_pkt = m_defrag(*m_headp, M_NOWAIT); if (mbuf_pkt == NULL) { err = ENOBUFS; goto _exit0; } *m_headp = mbuf_pkt; goto retry; } _exit0: return (err); } int vxge_device_hw_info_get(vxge_dev_t *vdev) { int i, err = ENXIO; u64 vpath_mask = 0; u32 max_supported_vpath = 0; u32 fw_ver_maj_min; vxge_firmware_upgrade_e fw_option; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_device_hw_info_t *hw_info; status = vxge_hal_device_hw_info_get(vdev->pdev, (pci_reg_h) vdev->pdev->reg_map[0], (u8 *) vdev->pdev->bar_info[0], &vdev->config.hw_info); if (status != VXGE_HAL_OK) goto _exit0; hw_info = &vdev->config.hw_info; vpath_mask = hw_info->vpath_mask; if (vpath_mask == 0) { device_printf(vdev->ndev, "No vpaths available in device\n"); goto _exit0; } fw_option = vdev->config.fw_option; /* Check how many vpaths are available */ for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { if (!((vpath_mask) & mBIT(i))) continue; max_supported_vpath++; } vdev->max_supported_vpath = max_supported_vpath; status = vxge_hal_device_is_privileged(hw_info->host_type, hw_info->func_id); vdev->is_privilaged = (status == VXGE_HAL_OK) ? TRUE : FALSE; vdev->hw_fw_version = VXGE_FW_VERSION( hw_info->fw_version.major, hw_info->fw_version.minor, hw_info->fw_version.build); fw_ver_maj_min = VXGE_FW_MAJ_MIN_VERSION(hw_info->fw_version.major, hw_info->fw_version.minor); if ((fw_option >= VXGE_FW_UPGRADE_FORCE) || (vdev->hw_fw_version != VXGE_DRV_FW_VERSION)) { /* For fw_ver 1.8.1 and above ignore build number. */ if ((fw_option == VXGE_FW_UPGRADE_ALL) && ((vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 1)) && (fw_ver_maj_min == VXGE_DRV_FW_MAJ_MIN_VERSION))) { goto _exit1; } if (vdev->hw_fw_version < VXGE_BASE_FW_VERSION) { device_printf(vdev->ndev, "Upgrade driver through vxge_update, " "Unable to load the driver.\n"); goto _exit0; } vdev->fw_upgrade = TRUE; } _exit1: err = 0; _exit0: return (err); } /* * vxge_device_hw_info_print * Print device and driver information */ void vxge_device_hw_info_print(vxge_dev_t *vdev) { u32 i; device_t ndev; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; char pmd_type[2][VXGE_PMD_INFO_LEN]; vxge_hal_device_t *hldev; vxge_hal_device_hw_info_t *hw_info; vxge_hal_device_pmd_info_t *pmd_port; hldev = vdev->devh; ndev = vdev->ndev; ctx = device_get_sysctl_ctx(ndev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(ndev)); hw_info = &(vdev->config.hw_info); snprintf(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION], sizeof(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]), "%d.%d.%d.%d", XGELL_VERSION_MAJOR, XGELL_VERSION_MINOR, XGELL_VERSION_FIX, XGELL_VERSION_BUILD); /* Print PCI-e bus type/speed/width info */ snprintf(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO], sizeof(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]), "x%d", hldev->link_width); if (hldev->link_width <= VXGE_HAL_PCI_E_LINK_WIDTH_X4) device_printf(ndev, "For optimal performance a x8 " "PCI-Express slot is required.\n"); vxge_null_terminate((char *) hw_info->serial_number, sizeof(hw_info->serial_number)); vxge_null_terminate((char *) hw_info->part_number, sizeof(hw_info->part_number)); snprintf(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO], sizeof(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]), "%s", hw_info->serial_number); snprintf(vdev->config.nic_attr[VXGE_PRINT_PART_NO], sizeof(vdev->config.nic_attr[VXGE_PRINT_PART_NO]), "%s", hw_info->part_number); snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION], sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]), "%s", hw_info->fw_version.version); snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_DATE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_DATE]), "%s", hw_info->fw_date.date); pmd_port = &(hw_info->pmd_port0); for (i = 0; i < hw_info->ports; i++) { vxge_pmd_port_type_get(vdev, pmd_port->type, pmd_type[i], sizeof(pmd_type[i])); strncpy(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i], "vendor=??, sn=??, pn=??, type=??", sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i])); vxge_null_terminate(pmd_port->vendor, sizeof(pmd_port->vendor)); if (strlen(pmd_port->vendor) == 0) { pmd_port = &(hw_info->pmd_port1); continue; } vxge_null_terminate(pmd_port->ser_num, sizeof(pmd_port->ser_num)); vxge_null_terminate(pmd_port->part_num, sizeof(pmd_port->part_num)); snprintf(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i], sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]), "vendor=%s, sn=%s, pn=%s, type=%s", pmd_port->vendor, pmd_port->ser_num, pmd_port->part_num, pmd_type[i]); pmd_port = &(hw_info->pmd_port1); } switch (hw_info->function_mode) { case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17: snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), "%s %d %s", "Single Function - 1 function(s)", vdev->max_supported_vpath, "VPath(s)/function"); break; case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8: snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), "%s %d %s", "Multi Function - 2 function(s)", vdev->max_supported_vpath, "VPath(s)/function"); break; case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4: snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), "%s %d %s", "Multi Function - 4 function(s)", vdev->max_supported_vpath, "VPath(s)/function"); break; case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2: snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), "%s %d %s", "Multi Function - 8 function(s)", vdev->max_supported_vpath, "VPath(s)/function"); break; case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2: snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), "%s %d %s", "Multi Function (DirectIO) - 8 function(s)", vdev->max_supported_vpath, "VPath(s)/function"); break; } snprintf(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]), "%s", ((vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) ? "MSI-X" : "INTA")); snprintf(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT], sizeof(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]), "%d", vdev->no_of_vpath); snprintf(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE], sizeof(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]), "%u", vdev->ifp->if_mtu); snprintf(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]), "%s", ((vdev->config.lro_enable) ? "Enabled" : "Disabled")); snprintf(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]), "%s", ((vdev->config.rth_enable) ? "Enabled" : "Disabled")); snprintf(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]), "%s", ((vdev->ifp->if_capenable & IFCAP_TSO4) ? "Enabled" : "Disabled")); snprintf(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE], sizeof(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]), "%s", ((hw_info->ports == 1) ? "Single Port" : "Dual Port")); if (vdev->is_privilaged) { if (hw_info->ports > 1) { snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]), "%s", vxge_port_mode[vdev->port_mode]); if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE], sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]), "%s", vxge_port_failure[vdev->port_failure]); vxge_active_port_update(vdev); snprintf(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT], sizeof(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]), "%lld", vdev->active_port); } if (!is_single_func(hw_info->function_mode)) { snprintf(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE], sizeof(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]), "%s", ((vdev->l2_switch) ? "Enabled" : "Disabled")); } } device_printf(ndev, "Driver version\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]); device_printf(ndev, "Serial number\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]); device_printf(ndev, "Part number\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PART_NO]); device_printf(ndev, "Firmware version\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]); device_printf(ndev, "Firmware date\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_FW_DATE]); device_printf(ndev, "Link width\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]); if (vdev->is_privilaged) { device_printf(ndev, "Function mode\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]); } device_printf(ndev, "Interrupt type\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]); device_printf(ndev, "VPath(s) opened\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]); device_printf(ndev, "Adapter Type\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]); device_printf(ndev, "PMD Port 0\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0]); if (hw_info->ports > 1) { device_printf(ndev, "PMD Port 1\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1]); if (vdev->is_privilaged) { device_printf(ndev, "Port Mode\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]); if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) device_printf(ndev, "Port Failure\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]); device_printf(vdev->ndev, "Active Port\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]); } } if (vdev->is_privilaged && !is_single_func(hw_info->function_mode)) { device_printf(vdev->ndev, "L2 Switch\t: %s\n", vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]); } device_printf(ndev, "MTU is %s\n", vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]); device_printf(ndev, "LRO %s\n", vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]); device_printf(ndev, "RTH %s\n", vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]); device_printf(ndev, "TSO %s\n", vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver version", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION], 0, "Driver version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Serial number", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO], 0, "Serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Part number", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PART_NO], 0, "Part number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Firmware version", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_FW_VERSION], 0, "Firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Firmware date", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_FW_DATE], 0, "Firmware date"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Link width", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO], 0, "Link width"); if (vdev->is_privilaged) { SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Function mode", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 0, "Function mode"); } SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Interrupt type", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_INTR_MODE], 0, "Interrupt type"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "VPath(s) opened", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT], 0, "VPath(s) opened"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Adapter Type", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE], 0, "Adapter Type"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pmd port 0", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0], 0, "pmd port"); if (hw_info->ports > 1) { SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pmd port 1", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1], 0, "pmd port"); if (vdev->is_privilaged) { SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Port Mode", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PORT_MODE], 0, "Port Mode"); if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Port Failure", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE], 0, "Port Failure"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "L2 Switch", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE], 0, "L2 Switch"); } } SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "LRO mode", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_LRO_MODE], 0, "LRO mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "RTH mode", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_RTH_MODE], 0, "RTH mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "TSO mode", CTLFLAG_RD, vdev->config.nic_attr[VXGE_PRINT_TSO_MODE], 0, "TSO mode"); } void vxge_pmd_port_type_get(vxge_dev_t *vdev, u32 port_type, char *ifm_name, u8 ifm_len) { vdev->ifm_optics = IFM_UNKNOWN; switch (port_type) { case VXGE_HAL_DEVICE_PMD_TYPE_10G_SR: vdev->ifm_optics = IFM_10G_SR; strlcpy(ifm_name, "10GbE SR", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_LR: vdev->ifm_optics = IFM_10G_LR; strlcpy(ifm_name, "10GbE LR", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_LRM: vdev->ifm_optics = IFM_10G_LRM; strlcpy(ifm_name, "10GbE LRM", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_DIRECT: vdev->ifm_optics = IFM_10G_TWINAX; strlcpy(ifm_name, "10GbE DA (Direct Attached)", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_CX4: vdev->ifm_optics = IFM_10G_CX4; strlcpy(ifm_name, "10GbE CX4", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_BASE_T: #if __FreeBSD_version >= 800000 vdev->ifm_optics = IFM_10G_T; #endif strlcpy(ifm_name, "10GbE baseT", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_10G_OTHER: strlcpy(ifm_name, "10GbE Other", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_SX: vdev->ifm_optics = IFM_1000_SX; strlcpy(ifm_name, "1GbE SX", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_LX: vdev->ifm_optics = IFM_1000_LX; strlcpy(ifm_name, "1GbE LX", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX: vdev->ifm_optics = IFM_1000_CX; strlcpy(ifm_name, "1GbE CX", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_BASE_T: vdev->ifm_optics = IFM_1000_T; strlcpy(ifm_name, "1GbE baseT", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_DIRECT: strlcpy(ifm_name, "1GbE DA (Direct Attached)", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX4: strlcpy(ifm_name, "1GbE CX4", ifm_len); break; case VXGE_HAL_DEVICE_PMD_TYPE_1G_OTHER: strlcpy(ifm_name, "1GbE Other", ifm_len); break; default: case VXGE_HAL_DEVICE_PMD_TYPE_UNKNOWN: strlcpy(ifm_name, "UNSUP", ifm_len); break; } } u32 vxge_ring_length_get(u32 buffer_mode) { return (VXGE_DEFAULT_RING_BLOCK * vxge_hal_ring_rxds_per_block_get(buffer_mode)); } /* * Removes trailing spaces padded * and NULL terminates strings */ static inline void vxge_null_terminate(char *str, size_t len) { len--; while (*str && (*str != ' ') && (len != 0)) ++str; --len; if (*str) *str = '\0'; } /* * vxge_ioctl * Callback to control the device */ int vxge_ioctl(ifnet_t ifp, u_long command, caddr_t data) { int mask, err = 0; vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; if (!vdev->is_active) return (EBUSY); switch (command) { - /* Set/Get ifnet address */ - case SIOCSIFADDR: - case SIOCGIFADDR: - ether_ioctl(ifp, command, data); - break; - /* Set Interface MTU */ case SIOCSIFMTU: err = vxge_change_mtu(vdev, (unsigned long)ifr->ifr_mtu); break; /* Set Interface Flags */ case SIOCSIFFLAGS: VXGE_DRV_LOCK(vdev); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ vdev->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) vxge_promisc_set(vdev); } else { vxge_init_locked(vdev); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vxge_stop_locked(vdev); } vdev->if_flags = ifp->if_flags; VXGE_DRV_UNLOCK(vdev); break; /* Add/delete multicast address */ case SIOCADDMULTI: case SIOCDELMULTI: break; /* Get/Set Interface Media */ case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &vdev->media, command); break; /* Set Capabilities */ case SIOCSIFCAP: VXGE_DRV_LOCK(vdev); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if ((ifp->if_capenable & IFCAP_TSO) && !(ifp->if_capenable & IFCAP_TXCSUM)) { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "TSO Disabled\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (ifp->if_capenable & IFCAP_TSO) { if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= CSUM_TSO; if_printf(ifp, "TSO Enabled\n"); } else { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "Enable tx checksum offload \ first.\n"); err = EAGAIN; } } else { ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "TSO Disabled\n"); } } if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_MTU) ifp->if_capenable ^= IFCAP_VLAN_MTU; if (mask & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; #if __FreeBSD_version >= 800000 if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; #endif #if defined(VLAN_CAPABILITIES) VLAN_CAPABILITIES(ifp); #endif VXGE_DRV_UNLOCK(vdev); break; case SIOCGPRIVATE_0: VXGE_DRV_LOCK(vdev); err = vxge_ioctl_stats(vdev, ifr); VXGE_DRV_UNLOCK(vdev); break; case SIOCGPRIVATE_1: VXGE_DRV_LOCK(vdev); err = vxge_ioctl_regs(vdev, ifr); VXGE_DRV_UNLOCK(vdev); break; default: err = ether_ioctl(ifp, command, data); break; } return (err); } /* * vxge_ioctl_regs * IOCTL to get registers */ int vxge_ioctl_regs(vxge_dev_t *vdev, struct ifreq *ifr) { u64 value = 0x0; u32 vp_id = 0; u32 offset, reqd_size = 0; int i, err = EINVAL; char *command = ifr_data_get_ptr(ifr); void *reg_info = ifr_data_get_ptr(ifr); vxge_vpath_t *vpath; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_mgmt_reg_type_e regs_type; switch (*command) { case vxge_hal_mgmt_reg_type_pcicfgmgmt: if (vdev->is_privilaged) { reqd_size = sizeof(vxge_hal_pcicfgmgmt_reg_t); regs_type = vxge_hal_mgmt_reg_type_pcicfgmgmt; } break; case vxge_hal_mgmt_reg_type_mrpcim: if (vdev->is_privilaged) { reqd_size = sizeof(vxge_hal_mrpcim_reg_t); regs_type = vxge_hal_mgmt_reg_type_mrpcim; } break; case vxge_hal_mgmt_reg_type_srpcim: if (vdev->is_privilaged) { reqd_size = sizeof(vxge_hal_srpcim_reg_t); regs_type = vxge_hal_mgmt_reg_type_srpcim; } break; case vxge_hal_mgmt_reg_type_memrepair: if (vdev->is_privilaged) { /* reqd_size = sizeof(vxge_hal_memrepair_reg_t); */ regs_type = vxge_hal_mgmt_reg_type_memrepair; } break; case vxge_hal_mgmt_reg_type_legacy: reqd_size = sizeof(vxge_hal_legacy_reg_t); regs_type = vxge_hal_mgmt_reg_type_legacy; break; case vxge_hal_mgmt_reg_type_toc: reqd_size = sizeof(vxge_hal_toc_reg_t); regs_type = vxge_hal_mgmt_reg_type_toc; break; case vxge_hal_mgmt_reg_type_common: reqd_size = sizeof(vxge_hal_common_reg_t); regs_type = vxge_hal_mgmt_reg_type_common; break; case vxge_hal_mgmt_reg_type_vpmgmt: reqd_size = sizeof(vxge_hal_vpmgmt_reg_t); regs_type = vxge_hal_mgmt_reg_type_vpmgmt; vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]); vp_id = vpath->vp_id; break; case vxge_hal_mgmt_reg_type_vpath: reqd_size = sizeof(vxge_hal_vpath_reg_t); regs_type = vxge_hal_mgmt_reg_type_vpath; vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]); vp_id = vpath->vp_id; break; case VXGE_GET_VPATH_COUNT: *((u32 *) reg_info) = vdev->no_of_vpath; err = 0; break; default: reqd_size = 0; break; } if (reqd_size) { for (i = 0, offset = 0; offset < reqd_size; i++, offset += 0x0008) { value = 0x0; status = vxge_hal_mgmt_reg_read(vdev->devh, regs_type, vp_id, offset, &value); err = (status != VXGE_HAL_OK) ? EINVAL : 0; if (err == EINVAL) break; *((u64 *) ((u64 *) reg_info + i)) = value; } } return (err); } /* * vxge_ioctl_stats * IOCTL to get statistics */ int vxge_ioctl_stats(vxge_dev_t *vdev, struct ifreq *ifr) { int i, retsize, err = EINVAL; u32 bufsize; vxge_vpath_t *vpath; vxge_bw_info_t *bw_info; vxge_port_info_t *port_info; vxge_drv_stats_t *drv_stat; char *buffer = NULL; char *command = ifr_data_get_ptr(ifr); vxge_hal_status_e status = VXGE_HAL_OK; switch (*command) { case VXGE_GET_PCI_CONF: bufsize = VXGE_STATS_BUFFER_SIZE; buffer = (char *) vxge_mem_alloc(bufsize); if (buffer != NULL) { status = vxge_hal_aux_pci_config_read(vdev->devh, bufsize, buffer, &retsize); if (status == VXGE_HAL_OK) err = copyout(buffer, ifr_data_get_ptr(ifr), retsize); else device_printf(vdev->ndev, "failed pciconfig statistics query\n"); vxge_mem_free(buffer, bufsize); } break; case VXGE_GET_MRPCIM_STATS: if (!vdev->is_privilaged) break; bufsize = VXGE_STATS_BUFFER_SIZE; buffer = (char *) vxge_mem_alloc(bufsize); if (buffer != NULL) { status = vxge_hal_aux_stats_mrpcim_read(vdev->devh, bufsize, buffer, &retsize); if (status == VXGE_HAL_OK) err = copyout(buffer, ifr_data_get_ptr(ifr), retsize); else device_printf(vdev->ndev, "failed mrpcim statistics query\n"); vxge_mem_free(buffer, bufsize); } break; case VXGE_GET_DEVICE_STATS: bufsize = VXGE_STATS_BUFFER_SIZE; buffer = (char *) vxge_mem_alloc(bufsize); if (buffer != NULL) { status = vxge_hal_aux_stats_device_read(vdev->devh, bufsize, buffer, &retsize); if (status == VXGE_HAL_OK) err = copyout(buffer, ifr_data_get_ptr(ifr), retsize); else device_printf(vdev->ndev, "failed device statistics query\n"); vxge_mem_free(buffer, bufsize); } break; case VXGE_GET_DEVICE_HWINFO: bufsize = sizeof(vxge_device_hw_info_t); buffer = (char *) vxge_mem_alloc(bufsize); if (buffer != NULL) { vxge_os_memcpy( &(((vxge_device_hw_info_t *) buffer)->hw_info), &vdev->config.hw_info, sizeof(vxge_hal_device_hw_info_t)); ((vxge_device_hw_info_t *) buffer)->port_mode = vdev->port_mode; ((vxge_device_hw_info_t *) buffer)->port_failure = vdev->port_failure; err = copyout(buffer, ifr_data_get_ptr(ifr), bufsize); if (err != 0) device_printf(vdev->ndev, "failed device hardware info query\n"); vxge_mem_free(buffer, bufsize); } break; case VXGE_GET_DRIVER_STATS: bufsize = sizeof(vxge_drv_stats_t) * vdev->no_of_vpath; drv_stat = (vxge_drv_stats_t *) vxge_mem_alloc(bufsize); if (drv_stat != NULL) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &(vdev->vpaths[i]); vpath->driver_stats.rx_lro_queued += vpath->lro.lro_queued; vpath->driver_stats.rx_lro_flushed += vpath->lro.lro_flushed; vxge_os_memcpy(&drv_stat[i], &(vpath->driver_stats), sizeof(vxge_drv_stats_t)); } err = copyout(drv_stat, ifr_data_get_ptr(ifr), bufsize); if (err != 0) device_printf(vdev->ndev, "failed driver statistics query\n"); vxge_mem_free(drv_stat, bufsize); } break; case VXGE_GET_BANDWIDTH: bw_info = ifr_data_get_ptr(ifr); if ((vdev->config.hw_info.func_id != 0) && (vdev->hw_fw_version < VXGE_FW_VERSION(1, 8, 0))) break; if (vdev->config.hw_info.func_id != 0) bw_info->func_id = vdev->config.hw_info.func_id; status = vxge_bw_priority_get(vdev, bw_info); if (status != VXGE_HAL_OK) break; err = copyout(bw_info, ifr_data_get_ptr(ifr), sizeof(vxge_bw_info_t)); break; case VXGE_SET_BANDWIDTH: if (vdev->is_privilaged) err = vxge_bw_priority_set(vdev, ifr); break; case VXGE_SET_PORT_MODE: if (vdev->is_privilaged) { if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { port_info = ifr_data_get_ptr(ifr); vdev->config.port_mode = port_info->port_mode; err = vxge_port_mode_update(vdev); if (err != ENXIO) err = VXGE_HAL_FAIL; else { err = VXGE_HAL_OK; device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n"); } } } break; case VXGE_GET_PORT_MODE: if (vdev->is_privilaged) { if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { port_info = ifr_data_get_ptr(ifr); err = vxge_port_mode_get(vdev, port_info); if (err == VXGE_HAL_OK) { err = copyout(port_info, ifr_data_get_ptr(ifr), sizeof(vxge_port_info_t)); } } } break; default: break; } return (err); } int vxge_bw_priority_config(vxge_dev_t *vdev) { u32 i; int err = EINVAL; for (i = 0; i < vdev->no_of_func; i++) { err = vxge_bw_priority_update(vdev, i, TRUE); if (err != 0) break; } return (err); } int vxge_bw_priority_set(vxge_dev_t *vdev, struct ifreq *ifr) { int err; u32 func_id; vxge_bw_info_t *bw_info; bw_info = ifr_data_get_ptr(ifr); func_id = bw_info->func_id; vdev->config.bw_info[func_id].priority = bw_info->priority; vdev->config.bw_info[func_id].bandwidth = bw_info->bandwidth; err = vxge_bw_priority_update(vdev, func_id, FALSE); return (err); } int vxge_bw_priority_update(vxge_dev_t *vdev, u32 func_id, bool binit) { u32 i, set = 0; u32 bandwidth, priority, vpath_count; u64 vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS]; vxge_hal_device_t *hldev; vxge_hal_vp_config_t *vp_config; vxge_hal_status_e status = VXGE_HAL_OK; hldev = vdev->devh; status = vxge_hal_get_vpath_list(vdev->devh, func_id, vpath_list, &vpath_count); if (status != VXGE_HAL_OK) return (status); for (i = 0; i < vpath_count; i++) { vp_config = &(hldev->config.vp_config[vpath_list[i]]); /* Configure Bandwidth */ if (vdev->config.bw_info[func_id].bandwidth != VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) { set = 1; bandwidth = vdev->config.bw_info[func_id].bandwidth; if (bandwidth < VXGE_HAL_VPATH_BW_LIMIT_MIN || bandwidth > VXGE_HAL_VPATH_BW_LIMIT_MAX) { bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT; } vp_config->bandwidth = bandwidth; } /* * If b/w limiting is enabled on any of the * VFs, then for remaining VFs set the priority to 3 * and b/w limiting to max i.e 10 Gb) */ if (vp_config->bandwidth == VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) vp_config->bandwidth = VXGE_HAL_VPATH_BW_LIMIT_MAX; if (binit && vdev->config.low_latency) { if (func_id == 0) vdev->config.bw_info[func_id].priority = VXGE_DEFAULT_VPATH_PRIORITY_HIGH; } /* Configure Priority */ if (vdev->config.bw_info[func_id].priority != VXGE_HAL_VPATH_PRIORITY_DEFAULT) { set = 1; priority = vdev->config.bw_info[func_id].priority; if (priority < VXGE_HAL_VPATH_PRIORITY_MIN || priority > VXGE_HAL_VPATH_PRIORITY_MAX) { priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT; } vp_config->priority = priority; } else if (vdev->config.low_latency) { set = 1; vp_config->priority = VXGE_DEFAULT_VPATH_PRIORITY_LOW; } if (set == 1) { status = vxge_hal_rx_bw_priority_set(vdev->devh, vpath_list[i]); if (status != VXGE_HAL_OK) break; if (vpath_list[i] < VXGE_HAL_TX_BW_VPATH_LIMIT) { status = vxge_hal_tx_bw_priority_set( vdev->devh, vpath_list[i]); if (status != VXGE_HAL_OK) break; } } } return ((status == VXGE_HAL_OK) ? 0 : EINVAL); } /* * vxge_intr_coalesce_tx * Changes interrupt coalescing if the interrupts are not within a range * Return Value: Nothing */ void vxge_intr_coalesce_tx(vxge_vpath_t *vpath) { u32 timer; if (!vpath->tx_intr_coalesce) return; vpath->tx_interrupts++; if (ticks > vpath->tx_ticks + hz/100) { vpath->tx_ticks = ticks; timer = vpath->tti_rtimer_val; if (vpath->tx_interrupts > VXGE_MAX_TX_INTERRUPT_COUNT) { if (timer != VXGE_TTI_RTIMER_ADAPT_VAL) { vpath->tti_rtimer_val = VXGE_TTI_RTIMER_ADAPT_VAL; vxge_hal_vpath_dynamic_tti_rtimer_set( vpath->handle, vpath->tti_rtimer_val); } } else { if (timer != 0) { vpath->tti_rtimer_val = 0; vxge_hal_vpath_dynamic_tti_rtimer_set( vpath->handle, vpath->tti_rtimer_val); } } vpath->tx_interrupts = 0; } } /* * vxge_intr_coalesce_rx * Changes interrupt coalescing if the interrupts are not within a range * Return Value: Nothing */ void vxge_intr_coalesce_rx(vxge_vpath_t *vpath) { u32 timer; if (!vpath->rx_intr_coalesce) return; vpath->rx_interrupts++; if (ticks > vpath->rx_ticks + hz/100) { vpath->rx_ticks = ticks; timer = vpath->rti_rtimer_val; if (vpath->rx_interrupts > VXGE_MAX_RX_INTERRUPT_COUNT) { if (timer != VXGE_RTI_RTIMER_ADAPT_VAL) { vpath->rti_rtimer_val = VXGE_RTI_RTIMER_ADAPT_VAL; vxge_hal_vpath_dynamic_rti_rtimer_set( vpath->handle, vpath->rti_rtimer_val); } } else { if (timer != 0) { vpath->rti_rtimer_val = 0; vxge_hal_vpath_dynamic_rti_rtimer_set( vpath->handle, vpath->rti_rtimer_val); } } vpath->rx_interrupts = 0; } } /* * vxge_methods FreeBSD device interface entry points */ static device_method_t vxge_methods[] = { DEVMETHOD(device_probe, vxge_probe), DEVMETHOD(device_attach, vxge_attach), DEVMETHOD(device_detach, vxge_detach), DEVMETHOD(device_shutdown, vxge_shutdown), DEVMETHOD_END }; static driver_t vxge_driver = { "vxge", vxge_methods, sizeof(vxge_dev_t), }; static devclass_t vxge_devclass; DRIVER_MODULE(vxge, pci, vxge_driver, vxge_devclass, 0, 0);